Home | History | Annotate | Download | only in nnapi
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 #ifndef NN_API_SHIM_H0
     16 #define NN_API_SHIM_H0
     17 
     18 #include <dlfcn.h>
     19 #include <stdint.h>
     20 #include <stdio.h>
     21 #include <stdlib.h>
     22 
     23 // helpers
     24 
     25 #define NNAPI_LOG(format, ...) printf(format "\n", __VA_ARGS__);
     26 #define LOAD_FUNCTION(name) \
     27   static name##_fn fn = reinterpret_cast<name##_fn>(loadFunction(#name));
     28 #define EXECUTE_FUNCTION(...) \
     29   if (fn != nullptr) {        \
     30     fn(__VA_ARGS__);          \
     31   }
     32 #define EXECUTE_FUNCTION_RETURN(...) return fn != nullptr ? fn(__VA_ARGS__) : 0;
     33 
     34 inline void* loadLibrary(const char* name) {
     35   // TODO: change RTLD_LOCAL? Assumes there can be multiple instances of nn
     36   // api RT
     37   void* handle = dlopen(name, RTLD_LAZY | RTLD_LOCAL);
     38   if (handle == nullptr) {
     39     NNAPI_LOG("nnapi error: unable to open library %s", name);
     40   }
     41   return handle;
     42 }
     43 
     44 inline void* getLibraryHandle() {
     45   static void* handle = loadLibrary("libneuralnetworks.so");
     46   return handle;
     47 }
     48 
     49 inline void* loadFunction(const char* name) {
     50   void* fn = nullptr;
     51   if (getLibraryHandle() != nullptr) {
     52     fn = dlsym(getLibraryHandle(), name);
     53   }
     54   if (fn == nullptr) {
     55     NNAPI_LOG("nnapi error: unable to open function %s", name);
     56   }
     57   return fn;
     58 }
     59 
     60 inline bool NNAPIExists() {
     61   static bool nnapi_is_available = getLibraryHandle();
     62   return nnapi_is_available;
     63 }
     64 
     65 // nn api types
     66 
     67 /**
     68  * Operand types.
     69  *
     70  * The type of operands that can be added to a model.
     71  *
     72  * Although we define many types, most operators accept just a few
     73  * types.  Most used are ANEURALNETWORKS_TENSOR_FLOAT32,
     74  * ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, and ANEURALNETWORKS_INT32.
     75  */
     76 enum {
     77   /** The following entries are used to declare scalars. */
     78 
     79   /** A 32 bit floating point scalar value. */
     80   ANEURALNETWORKS_FLOAT32 = 0,
     81   /** A signed 32 bit integer scalar value. */
     82   ANEURALNETWORKS_INT32 = 1,
     83   /** An unsigned 32 bit integer scalar value. */
     84   ANEURALNETWORKS_UINT32 = 2,
     85 
     86   /** The following entries are used to declare tensors. */
     87 
     88   /** A tensor of 32 bit floating point values. */
     89   ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
     90   /** A tensor of 32 bit integer values. */
     91   ANEURALNETWORKS_TENSOR_INT32 = 4,
     92   /** A tensor of 8 bit integers that represent real numbers.
     93    *
     94    * Attached to this tensor are two numbers that can be used to convert
     95    * the 8 bit integer to the real value and vice versa.  These two numbers are:
     96    * - scale: a 32 bit floating point value
     97    * - zero_value: an 32 bit integer
     98    *
     99    * The formula is:
    100    * real_value = (integer_value - zero_value) * scale.
    101    */
    102   ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
    103 };
    104 
    105 /**
    106  * Operation types.
    107  *
    108  * The type of operations that can be added to a model.
    109  */
    110 enum {
    111   /** Adds two tensors, element-wise.
    112    *
    113    * Takes two input tensors of identical type and compatible dimensions. The
    114    * output is the sum of both input tensors, optionally modified by an
    115    * activation function.
    116    *
    117    * Two dimensions are compatible when:
    118    *     1. they are equal, or
    119    *     2. one of them is 1
    120    *
    121    * The size of the output is the maximum size along each dimension of the
    122    * input operands. It starts with the trailing dimensions, and works its way
    123    * forward.
    124    *
    125    * Example:
    126    *
    127    *     input1.dimension = {4, 1, 2}
    128    *     input2.dimension = {5, 4, 3, 1}
    129    *     output.dimension = {5, 4, 3, 2}
    130    *
    131    * Supported tensor types:
    132    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    133    *
    134    * Supported tensor rank: up to 4
    135    *
    136    * Inputs:
    137    * * 0: A tensor.
    138    * * 1: A tensor of the same type, and compatible dimensions as input0.
    139    * * 2: An INT32 value, and has to be one of the {@link FuseCode} values.
    140    *      Specifies the activation to invoke on the result of each addition.
    141    *
    142    * Outputs:
    143    * * 0: The sum, a tensor of the same type as input0.
    144    */
    145   ANEURALNETWORKS_ADD = 0,
    146   /** Performs a 2-D average pooling operation.
    147    *
    148    * The output dimensions are functions of the filter dimensions, stride, and
    149    * padding.
    150    *
    151    * The values in the output tensor are computed as:
    152    *
    153    *     output[batch, row, col, channel] =
    154    *         sum_{i, j}(input[batch, row + i, col + j, channel]) / sum(1)
    155    *
    156    * Supported tensor types:
    157    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    158    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    159    *
    160    * Supported tensor rank: 4, with "NHWC" data layout.
    161    *
    162    * Inputs:
    163    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the
    164    * input.
    165    * * 1: An INT32 value, specifying the padding on the left, in the width
    166    * dimension.
    167    * * 2: An INT32 value, specifying the padding on the right,in the width
    168    * dimension.
    169    * * 3: An INT32 value, specifying the padding on the top, in the height
    170    * dimension.
    171    * * 4: An INT32 value, specifying the padding on the bottom, in the height
    172    * dimension.
    173    * * 5: An INT32 value, specifying the output stride in the width dimension.
    174    * * 6: An INT32 value, specifying the output stride in the height
    175    * dimension.
    176    * * 7: An INT32 value, specifying the filter width.
    177    * * 8: An INT32 value, specifying the filter height.
    178    * * 9: An INT32 value, and has to be one of the {@link FuseCode} values.
    179    *      Specifies the activation to invoke on the result of each addition.
    180    *
    181    * Outputs:
    182    * * 0: The output 4-D tensor, of shape [batches, out_height, out_width,
    183    * depth].
    184    */
    185   ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
    186   /** Concatenates the input tensors along the given dimension.
    187    *
    188    * The input tensors must have identical type and the same dimensions except
    189    * the dimension along the concatenation axis.
    190    *
    191    * Supported tensor types:
    192    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    193    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    194    *
    195    * Supported tensor rank: up to 4
    196    *
    197    * Inputs:
    198    * 0 ~ n: The list on n input tensors, of shape [D0, D1, ..., Daxis(i), ...,
    199    * Dm] n+1: An INT32 value, specifying the concatenation axis. n+2: An INT32
    200    * value, and has to be one of the {@link FuseCode} values. Specifies the
    201    * activation to invoke on the result of each addition.
    202    *
    203    * Outputs:
    204    * * 0: The output, a tensor of the same type as the input tensors.
    205    *      The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
    206    */
    207   ANEURALNETWORKS_CONCATENATION = 2,
    208   /** Performs an 2-D convolution operation.
    209    *
    210    * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
    211    * batch of images, applying the filter to each window of each image of the
    212    * appropriate size.
    213    *
    214    * The output dimensions are functions of the filter dimensions, stride, and
    215    * padding.
    216    *
    217    * The values in the output tensor are computed as:
    218    *
    219    *     output[batch, row, col, channel] =
    220    *         sum_{i, j} (
    221    *             input[batch, row + i, col + j, k] *
    222    *             filter[channel, row + i, col + j, k] +
    223    *             bias[channel]
    224    *         )
    225    *
    226    * Supported tensor types:
    227    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    228    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    229    *
    230    * Supported tensor rank: 4, with "NHWC" data layout.
    231    *
    232    * Inputs:
    233    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying
    234    * the input.
    235    * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width,
    236    * depth_in], specifying the filter.
    237    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
    238    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the
    239    * bias should also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input
    240    * tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias should
    241    * be of {@link ANEURALNETWORKS_TENSOR_INT32}.
    242    * * 3: An INT32 value, specifying the padding on the left, in the width
    243    * dimension.
    244    * * 4: An INT32 value, specifying the padding on the right,in the width
    245    * dimension.
    246    * * 5: An INT32 value, specifying the padding on the top, in the height
    247    * dimension.
    248    * * 6: An INT32 value, specifying the padding on the bottom, in the height
    249    * dimension.
    250    * * 7: An INT32 value, specifying the output stride in the width dimension.
    251    * * 8: An INT32 value, specifying the output stride in the height
    252    * dimension.
    253    * * 9: An INT32 value, and has to be one of the {@link FuseCode} values.
    254    *      Specifies the activation to invoke on the result of each addition.
    255    *
    256    * Outputs:
    257    * * 0: The output 4-D tensor, of shape [batches, out_height, out_width,
    258    * depth_out].
    259    */
    260   ANEURALNETWORKS_CONV_2D = 3,
    261   /** Performs a depthwise 2-D convolution operation.
    262    *
    263    * Given an input tensor of shape [batches, height, width, depth_in] and a
    264    * filter tensor of shape [depth_out, filter_height, filter_width, depth_in]
    265    * containing in_channels convolutional filters of depth 1, DEPTHWISE_CONV
    266    * applies a different filter to each input channel (expanding from 1 channel
    267    * to channel_multiplier channels for each), then concatenates the results
    268    * together.
    269    *
    270    * The output has depth_out = depth_in * depth_multiplier channels.
    271    * The output dimensions are functions of the filter dimensions, stride, and
    272    * padding.
    273    *
    274    * The values in the output tensor are computed as:
    275    *
    276    *     output[b, i, j, k * channel_multiplier + q] =
    277    *         sum_{di, dj} (
    278    *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
    279    *             filter[di, dj, k, q]
    280    *         )
    281    *
    282    * Supported tensor types:
    283    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    284    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    285    *
    286    * Supported tensor rank: 4, with "NHWC" data layout.
    287    *
    288    * Inputs:
    289    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying
    290    * the input.
    291    * * 1: A 4-D tensor, of shape [depth_out, filter_height, filter_width,
    292    * depth_in], specifying the filter.
    293    * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
    294    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the
    295    * bias should also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input
    296    * tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias should
    297    * be of {@link ANEURALNETWORKS_TENSOR_INT32}.
    298    * * 3: An INT32 value, specifying the padding on the left, in the width
    299    * dimension.
    300    * * 4: An INT32 value, specifying the padding on the right,in the width
    301    * dimension.
    302    * * 5: An INT32 value, specifying the padding on the top, in the height
    303    * dimension.
    304    * * 6: An INT32 value, specifying the padding on the bottom, in the height
    305    * dimension.
    306    * * 7: An INT32 value, specifying the output stride in the width dimension.
    307    * * 8: An INT32 value, specifying the output stride in the height
    308    * dimension.
    309    * * 9: An INT32 value, specifying the depthwise multiplier.
    310    * * 10: An INT32 value, and has to be one of the {@link FuseCode} values.
    311    *       Specifies the activation to invoke on the result of each addition.
    312    *
    313    * Outputs:
    314    * * 0: The output 4-D tensor, of shape [batches, out_height, out_width,
    315    * depth_out].
    316    */
    317   ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
    318   /** Rearranges data from depth into blocks of spatial data.
    319    *
    320    * More specifically, this op outputs a copy of the input tensor where values
    321    * from the depth dimension are moved in spatial blocks to the height and
    322    * width dimensions. The value block_size indicates the input block size and
    323    * how the data is moved.
    324    *
    325    * Chunks of data of size block_size * block_size from depth are rearranged
    326    * into non-overlapping blocks of size block_size x block_size.
    327    *
    328    * The width of the output tensor is input_depth * block_size, whereas the
    329    * height is input_height * block_size. The depth of the input tensor must be
    330    * divisible by block_size * block_size
    331    *
    332    * Supported tensor types:
    333    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    334    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    335    *
    336    * Supported tensor rank: 4, with "NHWC" data layout.
    337    *
    338    * Inputs:
    339    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying
    340    * the input.
    341    * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
    342    *      block_size * block_size must be a divisor of the input depth.
    343    *
    344    * Outputs:
    345    * * 0: The output 4-D tensor, of shape [batch, height*block_size,
    346    * width*block_size, depth/(block_size*block_size)].
    347    */
    348   ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
    349   /** Dequantizes the input tensor.
    350    *
    351    * The formula is:
    352    *
    353    *     output = (input - zero_value) * scale.
    354    *
    355    * Supported tensor types:
    356    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    357    *
    358    * Supported tensor rank: up to 4
    359    *
    360    * Inputs:
    361    * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}.
    362    *
    363    * Outputs:
    364    * * 0: The output tensor of same shape as input0, but with type
    365    *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
    366    */
    367   ANEURALNETWORKS_DEQUANTIZE = 6,
    368 
    369   /**
    370    * Looks up items from a given tensor.
    371    *
    372    * Each item in the output is a raw copy of the corresponding item in
    373    * the input values. If the given lookup indices are out of bounds,
    374    * the op will fail and an error will be reported.
    375    *
    376    * Inputs:
    377    * * 0: Values. An n-D tensor of any type X (where n >= 2). E.g., if n is 2,
    378    *      then the shape would be [lookup_dimension, values_dimension], where
    379    *      lookup_dimension corresponds to the indexing dimension in the lookup
    380    *      table, and values_dimension to the contents.
    381    * * 1: Lookups. An 1-D tensor of type T, of shape [lookup_size], where
    382    *      lookup_size is the number of elements to look for, and each entry
    383    *      corresponds to the first dimension of the values tensor.
    384    *
    385    * Output:
    386    * * 0: A n-D tensor of type X and the same rank and shape as the values
    387    *      tensor, except for the first dimension which has size lookup_size.
    388    */
    389   ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
    390 
    391   /** Computes element-wise floor() on the input tensor.
    392    *
    393    * Supported tensor types:
    394    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    395    *
    396    * Supported tensor rank: up to 4
    397    *
    398    * Inputs:
    399    * * 0: A tensor.
    400    *
    401    * Outputs:
    402    * * 0: The output, a tensor of the same type and dimensions as input0.
    403    */
    404   ANEURALNETWORKS_FLOOR = 8,
    405   /** Denotes a fully (densely) connected layer, which connects all elements in
    406    * the input tensor with each element in the output tensor.
    407    *
    408    * This layer implements the operation:
    409    *
    410    *     outputs = activation(inputs * weights + bias)
    411    *
    412    * Supported tensor types:
    413    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    414    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    415    *
    416    * Supported tensor rank: up to 4.
    417    *
    418    * Inputs:
    419    * * 0: A tensor, specifying the input. If rank is greater than 2, then it
    420    * gets flattened to a 2-D Tensor. The 2-D Tensor is handled as if dimensions
    421    * corresponded to shape [batch_size, input_size], where batch_size
    422    * corresponds to the batching dimension, and input_size is the size of the
    423    * input.
    424    * * 1: A 2-D tensor, specifying the weights, of shape [num_units,
    425    * input_size], where "num_units" corresponds to the number of output nodes.
    426    * * 2: A 1-D tensor, of shape [num_units], specifying the bias.
    427    *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} type, the
    428    * bias should also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input
    429    * tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} type, the bias should
    430    * be of {@link ANEURALNETWORKS_TENSOR_INT32}.
    431    * * 3: An INT32 value, and has to be one of the {@link FuseCode} values.
    432    *      Specifies the activation to invoke on the result of each addition.
    433    *
    434    * Outputs:
    435    * * 0: The output tensor, of shape [batch_size, num_units].
    436    */
    437   ANEURALNETWORKS_FULLY_CONNECTED = 9,
    438 
    439   /**
    440    * Looks up values of a hash table with given keys.
    441    *
    442    * Inputs:
    443    * * 0: Lookups. A 1-D int32 tensor with shape [ k ].
    444    * * 1: Keys. A 1-D int32 tensor with shape [ n ], *MUST* be sorted in
    445    *      ascending order.
    446    * * 2: Values. A tensor with shape [ n  ].
    447    *
    448    * Outputs:
    449    * * 0: Output. A tensor with shape [ k ].
    450    * * 1: Hits. A uint8 tensor with shape [ k ] indicates whether the lookup
    451    *      hits or not.
    452    */
    453   ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
    454 
    455   /** Applies L2 normalization along the depth dimension.
    456    *
    457    * The values in the output tensor are computed as:
    458    *
    459    *     output[batch, row, col, channel] =
    460    *         input[batch, row, col, channel] /
    461    *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
    462    *
    463    * For x with more dimensions, independently normalizes each 1-D slice along
    464    * dimension dim.
    465    *
    466    * Supported tensor types:
    467    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    468    *
    469    * Supported tensor rank: 4, with "NHWC" data layout.
    470    *
    471    * Inputs:
    472    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the
    473    * input.
    474    *
    475    * Outputs:
    476    * * 0: The output 4-D tensor, of shape [batches, out_height, out_width,
    477    * depth].
    478    */
    479   ANEURALNETWORKS_L2_NORMALIZATION = 11,
    480 
    481   /** Performs an 2-D L2 pooling operation.
    482    *
    483    * The output dimensions are functions of the filter dimensions, stride, and
    484    * padding.
    485    *
    486    * The values in the output tensor are computed as:
    487    *
    488    *     output[batch, row, col, channel] =
    489    *         sqrt(sum_{i, j} pow(input[batch, row + i, col + j, channel], 2) /
    490    * sum(1))
    491    *
    492    * Supported tensor types:
    493    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    494    *
    495    * Supported tensor rank: 4, with "NHWC" data layout.
    496    *
    497    * Inputs:
    498    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the
    499    * input.
    500    * * 1: An INT32 value, specifying the padding on the left, in the width
    501    * dimension.
    502    * * 2: An INT32 value, specifying the padding on the right,in the width
    503    * dimension.
    504    * * 3: An INT32 value, specifying the padding on the top, in the height
    505    * dimension.
    506    * * 4: An INT32 value, specifying the padding on the bottom, in the height
    507    * dimension.
    508    * * 5: An INT32 value, specifying the output stride in the width dimension.
    509    * * 6: An INT32 value, specifying the output stride in the height
    510    * dimension.
    511    * * 7: An INT32 value, specifying the filter width.
    512    * * 8: An INT32 value, specifying the filter height.
    513    * * 9: An INT32 value, and has to be one of the {@link FuseCode} values.
    514    *      Specifies the activation to invoke on the result of each addition.
    515    *
    516    * Outputs:
    517    * * 0: The output 4-D tensor, of shape [batches, out_height, out_width,
    518    * depth].
    519    */
    520   ANEURALNETWORKS_L2_POOL_2D = 12,
    521   /** Applies Local Response Normalization along the depth dimension.
    522    *
    523    * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
    524    * last dimension), and each vector is normalized independently. Within a
    525    * given vector, each component is divided by the weighted, squared sum of
    526    * inputs within depth_radius.
    527    *
    528    * The output is calculated using this formula:
    529    *
    530    *     sqr_sum[a, b, c, d] =
    531    *         sum(pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)
    532    *     output = input / pow((bias + alpha * sqr_sum), beta)
    533    *
    534    * Supported tensor types:
    535    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    536    *
    537    * Supported tensor rank: 4, with "NHWC" data layout.
    538    *
    539    * Inputs:
    540    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the
    541    * input.
    542    * * 1: An INT32 value, specifying the radius of the normalization window.
    543    * * 2: A FLOAT32 value, specifying the bias, must not be zero.
    544    * * 3: A FLOAT32 value, specifying the scale factor, alpha.
    545    * * 4: A FLOAT32 value, specifying the exponent, beta.
    546    *
    547    * Outputs:
    548    * * 0: The output tensor of same shape as input0.
    549    */
    550   ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
    551   /** Computes sigmoid activation on the input tensor element-wise.
    552    *
    553    * The output is calculated using this formula:
    554    *
    555    *     output = 1 / (1 + exp(-input))
    556    *
    557    * Supported tensor types:
    558    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    559    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    560    *
    561    * Supported tensor rank: up to 4.
    562    *
    563    * Inputs:
    564    * * 0: A tensor, specifying the input.
    565    *
    566    * Outputs:
    567    * * 0: The output tensor of same shape as input0.
    568    */
    569   ANEURALNETWORKS_LOGISTIC = 14,
    570 
    571   /**
    572    * Projects an input to a bit vector via locality senstive hashing.
    573    *
    574    * Inputs:
    575    * * 0: Hash functions. Dim.size == 2, DataType: Float.
    576    *            Tensor[0].Dim[0]: Number of hash functions.
    577    *            Tensor[0].Dim[1]: Number of seeds per hash functions.
    578    *            Tensor[0].Dim[1] <= 32 in sparse case.
    579    *
    580    * * 1: Input. Dim.size >= 1, no restriction on DataType.
    581    * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
    582    *     If not set, each input element is considered to have the same weight of
    583    *     1.0.
    584    *     Tensor[1].Dim[0] == Tensor[2].Dim[0]
    585    * * 3: Type:
    586    *        Sparse: Value LSHProjectionType_SPARSE(=1).
    587    *          Computed bit vector is considered to be sparse.
    588    *          Each output element is an int32 made up of multiple bits computed
    589    * from hash functions.
    590    *
    591    *        Dense: Value LSHProjectionType_DENSE(=2).
    592    *          Computed bit vector is considered to be dense. Each output element
    593    *          represents a bit and can take the value of either 0 or 1.
    594    *
    595    * Outputs:
    596    * * 0: If the projection type is sparse:
    597    *        Output.Dim == { Tensor[0].Dim[0] }
    598    *        A tensor of int32 that represents hash signatures.
    599    *      If the projection type is Dense:
    600    *        Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
    601    *        A flattened tensor that represents projected bit vectors.
    602    */
    603   ANEURALNETWORKS_LSH_PROJECTION = 15,
    604 
    605   /**
    606    * Long short-term memory unit (LSTM) recurrent network layer.
    607    *
    608    * The default non-peephole implementation is based on:
    609    * http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
    610    * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
    611    * Computation, 9(8):1735-1780, 1997.
    612    *
    613    * The peephole implementation is based on:
    614    * https://research.google.com/pubs/archive/43905.pdf
    615    * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
    616    * recurrent neural network architectures for large scale acoustic modeling."
    617    * INTERSPEECH, 2014.
    618    *
    619    * The coupling of input and forget gate (CIFG) is based on:
    620    * http://arxiv.org/pdf/1503.04069.pdf
    621    * Greff et al. "LSTM: A Search Space Odyssey"
    622    *
    623    * The class has the following independently optional inputs:
    624    * * If input gate (if CIFG): input_to_forget_weights,
    625    *   recurrent_to_input_weights, cell_to_input_weights, input_gate_bias.
    626    * * If no peephole connections: cell_to_input_weights,
    627    *   cell_to_forget_weights, cell_to_output_weights.
    628    * * If no projection layer: projection_weights and projection_bias.
    629    * * If no projection bias: projection_bias.
    630    *
    631    * Supported tensor types:
    632    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    633    *
    634    * Inputs:
    635    * * 0: Input.
    636    *      A 2-D tensor of type T, of shape [batch_size, input_size], where
    637    *      batch_size corresponds to the batching dimension, and input_size
    638    *      is the size of the input.
    639    * * 1: input_to_input_weights.
    640    *      A 2-D tensor of type T, of shape [num_units, input_size], where
    641    *      num_units corresponds to the number of cell units.
    642    * * 2: input_to_forget_weights.
    643    *      A 2-D tensor of type T, of shape [num_units, input_size].
    644    * * 3: input_to_cell_weights.
    645    *      A 2-D tensor of type T, of shape [num_units, input_size].
    646    * * 4: input_to_output_weights.
    647    *      A 2-D tensor of type T, of shape [num_units, input_size].
    648    * * 5: recurrent_to_input_weights.
    649    *      A 2-D tensor of type T, of shape [num_units, output_size], where
    650    *      output_size corresponds to either the number of cell units (i.e.,
    651    *      num_units), or the second dimension of the projection_weights, if
    652    *      defined.
    653    * * 6: recurrent_to_forget_weights.
    654    *      A 2-D tensor of type T, of shape [num_units, output_size].
    655    * * 7: recurrent_to_cell_weights.
    656    *      A 2-D tensor of type T, of shape [num_units, output_size].
    657    * * 8: recurrent_to_output_weights.
    658    *      A 2-D tensor of type T, of shape [num_units, output_size].
    659    * * 9: cell_to_input_weights.
    660    *      A 1-D tensor of type T, of shape [num_units].
    661    * * 10:cell_to_forget_weights.
    662    *      A 1-D tensor of type T, of shape [num_units].
    663    * * 11:cell_to_output_weights.
    664    *      A 1-D tensor of type T, of shape [num_units].
    665    * * 12:input_gate_bias.
    666    *      A 1-D tensor of type T, of shape [num_units].
    667    * * 13:forget_gate_bias.
    668    *      A 1-D tensor of type T, of shape [num_units].
    669    * * 14:cell_bias.
    670    *      A 1-D tensor of type T, of shape [num_units].
    671    * * 15:output_gate_bias.
    672    *      A 1-D tensor of type T, of shape [num_units].
    673    * * 16:projection_weights.
    674    *      A 2-D tensor of type T, of shape [output_size, num_units].
    675    * * 17:projection_bias.
    676    *      A 1-D tensor of type T, of shape [output_size].
    677    *
    678    * Parameters:
    679    * * 18:fused_activation_function.
    680    *      An (optional) ActivationFunctionType indicating the activation
    681    *      function.
    682    *      If NONE is specified then it results in a linear activation.
    683    * * 19:cell_clip.
    684    *      A clipping threshold for the cell state, such that values are bound
    685    *      within [-cell_clip, cell_clip]. If set to 0.0 then clipping is
    686    *      disabled.
    687    * * 20:proj_clip.
    688    *      A clipping threshold for the output from the projection layer, such
    689    *      that values are bound within [-proj_clip, proj_clip]. If set to 0.0
    690    *      then clipping is disabled.
    691    *
    692    * Outputs:
    693    * * 0: scratch_buffer.
    694    *      A 3-D tensor of type T, of shape [batch_size, num_cell, 4].
    695    * * 1: output_state.
    696    *      A 2-D tensor of type T, of shape [batch_size, output_size].
    697    * * 2: cell_state.
    698    *      A 2-D tensor of type T, of shape [batch_size, num_units].
    699    * * 3: output.
    700    *      A 2-D tensor of type T, of shape [batch_size, output_size]. This is
    701    *      effectively the same as the current output_state value.
    702    */
    703   ANEURALNETWORKS_LSTM = 16,
    704 
    705   /** Performs an 2-D max pooling operation.
    706    *
    707    * The output dimensions are functions of the filter dimensions, stride, and
    708    * padding.
    709    *
    710    * The values in the output tensor are computed as:
    711    *
    712    *     output[batch, row, col, channel] =
    713    *         max_{i, j} (input[batch, row + i, col + j, channel])
    714    *
    715    * Supported tensor types:
    716    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    717    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    718    *
    719    * Supported tensor rank: 4, with "NHWC" data layout.
    720    *
    721    * Inputs:
    722    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the
    723    * input.
    724    * * 1: An INT32 value, specifying the padding on the left, in the width
    725    * dimension.
    726    * * 2: An INT32 value, specifying the padding on the right,in the width
    727    * dimension.
    728    * * 3: An INT32 value, specifying the padding on the top, in the height
    729    * dimension.
    730    * * 4: An INT32 value, specifying the padding on the bottom, in the height
    731    * dimension.
    732    * * 5: An INT32 value, specifying the output stride in the width dimension.
    733    * * 6: An INT32 value, specifying the output stride in the height
    734    * dimension.
    735    * * 7: An INT32 value, specifying the filter width.
    736    * * 8: An INT32 value, specifying the filter height.
    737    * * 9: An INT32 value, and has to be one of the {@link FuseCode} values.
    738    *      Specifies the activation to invoke on the result of each addition.
    739    *
    740    * Outputs:
    741    * * 0: The output 4-D tensor, of shape [batches, out_height, out_width,
    742    * depth].
    743    */
    744   ANEURALNETWORKS_MAX_POOL_2D = 17,
    745 
    746   /** Multiplies two tensors, element-wise.
    747    *
    748    * Takes two input tensors of identical type and compatible dimensions. The
    749    * output is the product of both input tensors, optionally modified by an
    750    * activation function.
    751    *
    752    * Two dimensions are compatible when:
    753    *     1. they are equal, or
    754    *     2. one of them is 1
    755    *
    756    * The size of the resulting output is the maximum size along each dimension
    757    * of the input operands. It starts with the trailing dimensions, and works
    758    * its way forward.
    759    *
    760    * Supported tensor types:
    761    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    762    *
    763    * Supported tensor rank: up to 4
    764    *
    765    * Inputs:
    766    * * 0: A tensor.
    767    * * 1: A tensor of the same type, and compatible dimensions as input0.
    768    * * 2: An INT32 value, and has to be one of the {@link FuseCode} values.
    769    *      Specifies the activation to invoke on the result of each addition.
    770    *
    771    * Outputs:
    772    * * 0: The product, a tensor of the same type as input0.
    773    */
    774   ANEURALNETWORKS_MUL = 18,
    775   /** Computes rectified linear activation on the input tensor element-wise.
    776    *
    777    * The output is calculated using this formula:
    778    *
    779    *     output = max(0, input)
    780    *
    781    * Supported tensor types:
    782    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    783    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    784    *
    785    * Supported tensor rank: up to 4.
    786    *
    787    * Inputs:
    788    * * 0: A tensor, specifying the input.
    789    *
    790    * Outputs:
    791    * * 0: The output tensor of same shape as input0.
    792    */
    793   ANEURALNETWORKS_RELU = 19,
    794   /** Computes rectified linear 1 activation on the input tensor element-wise.
    795    *
    796    * The output is calculated using this formula:
    797    *
    798    *     output = min(1.f, max(-1.f, input))
    799    *
    800    * Supported tensor types:
    801    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    802    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    803    *
    804    * Supported tensor rank: up to 4.
    805    *
    806    * Inputs:
    807    * * 0: A tensor, specifying the input.
    808    *
    809    * Outputs:
    810    * * 0: The output tensor of same shape as input0.
    811    */
    812   ANEURALNETWORKS_RELU1 = 20,
    813   /** Computes rectified linear 6 activation on the input tensor element-wise.
    814    *
    815    * The output is calculated using this formula:
    816    *
    817    *     output = min(6, max(0, input))
    818    *
    819    * Supported tensor types:
    820    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    821    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    822    *
    823    * Supported tensor rank: up to 4.
    824    *
    825    * Inputs:
    826    * * 0: A tensor, specifying the input.
    827    *
    828    * Outputs:
    829    * * 0: The output tensor of same shape as input0.
    830    */
    831   ANEURALNETWORKS_RELU6 = 21,
    832   /** Reshapes a tensor.
    833    *
    834    * Given tensor, this operation returns a tensor that has the same values as
    835    * tensor, but with a newly specified shape.
    836    *
    837    * Supported tensor types:
    838    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    839    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    840    *
    841    * Supported tensor rank: up to 4.
    842    *
    843    * Inputs:
    844    * * 0: A tensor, specifying the tensor to be reshaped.
    845    * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32}, defining
    846    * the shape of the output tensor. The number of elements implied by shape
    847    * must be the same as the number of elements in the input tensor.
    848    *
    849    * Outputs:
    850    * * 0: The output tensor, of shape specified by the input shape.
    851    */
    852   ANEURALNETWORKS_RESHAPE = 22,
    853   /** Resizes images to given size using the bilinear interpretation.
    854    *
    855    * Resized images will be distorted if their original aspect ratio is not the
    856    * same as input.
    857    *
    858    * Supported tensor types:
    859    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    860    *
    861    * Supported tensor rank: 4, with "NHWC" data layout.
    862    *
    863    * Inputs:
    864    * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying the
    865    * input.
    866    * * 1: An INT32 value, specifying the output width of the output tensor.
    867    * * 2: An INT32 value, specifying the output height of the output tensor.
    868    *
    869    * Outputs:
    870    * * 0: The output 4-D tensor, of shape [batches, new_height, new_width,
    871    * depth].
    872    */
    873   ANEURALNETWORKS_RESIZE_BILINEAR = 23,
    874 
    875   /**
    876    * A basic recurrent neural network layer.
    877    *
    878    * This layer implements the operation:
    879    * outputs = state = activation(inputs * input_weights + state *
    880    * recurrent_weights + bias)
    881    *
    882    * Where:
    883    * * input_weights is a weight matrix that multiplies the inputs;
    884    * * recurrent_weights is a weight matrix that multiplies the current
    885    *    state which itself is the output from the previous time step
    886    *    computation;
    887    * * bias is a bias vector (added to each output vector in the batch);
    888    * * activation is the function passed as the fused_activation_function
    889    *   argument (if not NONE).
    890    *
    891    * Supported tensor types:
    892    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    893    *
    894    * Inputs:
    895    * * 0: input.
    896    *      A 2-D tensor of type T, of shape [batch_size, input_size], where
    897    *      batch_size corresponds to the batching dimension, and input_size
    898    * is the size of the input.
    899    * * 1: weights.
    900    *      A 2-D tensor of type T, of shape [num_units, input_size], where
    901    *      num_units corresponds to the number of units.
    902    * * 2: recurrent_weights.
    903    *      A 2-D tensor of type T, of shape [num_units, num_units], with columns
    904    *      corresponding to the weights from each unit.
    905    * * 3: bias.
    906    *      A 1-D tensor of type T, of shape [num_units].
    907    *
    908    *    For FLOAT32 input tensor, bias must also be FLOAT32.
    909    *    For UINT8 input tensor, bias must be INT32.
    910    *
    911    * Parameters
    912    * * 4: fused_activation_function.
    913    *      An (optional) ActivationFunctionType indicating the activation
    914    *      function. If NONE is specified then it results in a linear
    915    *      activation.
    916    *
    917    * * 5: Hidden state.
    918    *      A 2-D tensor of type T, of shape [batch_size, num_units].
    919    *
    920    * Outputs:
    921    * * 0: output.
    922    *      A 2-D tensor of type T, of shape [batch_size, num_units]. This is
    923    *      effectively the same as the current state value.
    924    */
    925   ANEURALNETWORKS_RNN = 24,
    926 
    927   /** Computes the softmax activation on the input tensor element-wise, per
    928    * batch, by normalizing the input vector so the maximum coefficient is zero.
    929    *
    930    * The output is calculated using this formula:
    931    *
    932    *     output[batch, i] =
    933    *         exp((input[batch, i] - max(input[batch, :])) * beta) /
    934    *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
    935    *
    936    * Supported tensor types:
    937    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    938    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    939    *
    940    * Supported tensor rank: 2 or 4.
    941    *
    942    * Inputs:
    943    * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
    944    * * 1: A FLOAT32 value, specifying the scaling factor for the exponent, beta.
    945    *
    946    * Outputs:
    947    * * 0: The output tensor of same shape as input0.
    948    */
    949   ANEURALNETWORKS_SOFTMAX = 25,
    950 
    951   /** Rearranges blocks of spatial data, into depth.
    952    *
    953    * More specifically, this op outputs a copy of the input tensor where values
    954    * from the height and width dimensions are moved to the depth dimension. The
    955    * value block_size indicates the input block size and how the data is moved.
    956    *
    957    * Chunks of data of size block_size * block_size from depth are rearranged
    958    * into non-overlapping blocks of size block_size x block_size.
    959    *
    960    * The depth of the output tensor is input_depth * block_size * block_size.
    961    * The input tensor's height and width must be divisible by block_size.
    962    *
    963    * Supported tensor types:
    964    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
    965    * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
    966    *
    967    * Supported tensor rank: 4, with "NHWC" data layout.
    968    *
    969    * Inputs:
    970    * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], specifying
    971    * the input.
    972    * * 1: An INT32 value, specifying the block_size. block_size must be >=1 and
    973    *      block_size must be a divisor of both the input height and width.
    974    *
    975    * Outputs:
    976    * * 0: The output 4-D tensor, of shape [batch, height/block_size,
    977    * width/block_size, depth*block_size*block_size].
    978    */
    979   ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
    980 
    981   /**
    982    * SVDF op is a kind of stateful layer derived from the notion that a
    983    * densely connected layer that's processing a sequence of input frames can
    984    * be approximated by using a singular value decomposition of each of its
    985    * nodes. The implementation is based on:
    986    *
    987    * https://research.google.com/pubs/archive/43813.pdf
    988    *
    989    * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
    990    * Compressing Deep Neural Networks using a Rank-Constrained Topology.
    991    * INTERSPEECH, 2015.
    992    *
    993    * It processes the incoming input using a 2-stage filtering mechanism:
    994    * * stage 1 performs filtering on the "features" dimension, whose outputs get
    995    *   pushed into a memory of fixed-size memory_size.
    996    * * stage 2 performs filtering on the "time" dimension of the memory_size
    997    *   memoized outputs of stage 1.
    998    *
    999    * Specifically, for rank 1, this layer implements the operation:
   1000    *
   1001    *    memory = push(conv1d(inputs, weights_feature, feature_dim, "VALID"));
   1002    *    outputs = activation(memory * weights_time + bias);
   1003    *
   1004    * Where:
   1005    * * weights_feature is a weights matrix that processes the inputs (by
   1006    *   convolving the input with every feature filter), and whose outputs get
   1007    *   pushed, stacked in order, into the fixed-size memory (the oldest entry
   1008    *   gets dropped);
   1009    * * weights_time is a weights matrix that processes the memory (by a
   1010    *   batched matrix multiplication on the num_units);
   1011    * * bias is an optional bias vector (added to each output vector in the
   1012    *   batch); and
   1013    * * activation is the function passed as the fused_activation_function
   1014    *   argument (if not NONE).
   1015    *
   1016    * Each rank adds a dimension to the weights matrices by means of stacking
   1017    * the filters.
   1018    *
   1019    * Supported tensor types:
   1020    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
   1021    *
   1022    * Inputs:
   1023    * * 0: input.
   1024    *      A 2-D tensor of type T, of shape [batch_size, input_size], where
   1025    *      batch_size corresponds to the batching dimension, and input_size
   1026    * is the size of the input.
   1027    * * 1: weights_feature.
   1028    *      A 2-D tensor of type T, of shape [num_units, input_size], where
   1029    *      num_units corresponds to the number of units.
   1030    * * 2: weights_time.
   1031    *      A 2-D tensor of type T, of shape [num_units, memory_size], where
   1032    *      memory_size corresponds to the fixed-size of the memory.
   1033    * * 3: bias.
   1034    *      A optional 1-D tensor of type T, of shape [num_units].
   1035    *
   1036    *    For FLOAT32 input tensor, bias must also be FLOAT32.
   1037    *    For UINT8 input tensor, bias must be INT32.
   1038    *
   1039    * Parameters:
   1040    * * 4: rank.
   1041    *      The rank of the SVD approximation.
   1042    * * 5: fused_activation_function.
   1043    *      An (optional) ActivationFunctionType indicating the activation
   1044    * function. If NONE is specified then it results in a linear activation.
   1045    *
   1046    * Outputs:
   1047    * * 0: state.
   1048    *      A 2-D tensor of type T, of shape [batch_size, (memory_size - 1) *
   1049    * num_units * rank].
   1050    * * 1: output.
   1051    *      A 2-D tensor of type T, of shape [batch_size, num_units].
   1052    */
   1053   ANEURALNETWORKS_SVDF = 27,
   1054 
   1055   /** Computes hyperbolic tangent of input tensor element-wise.
   1056    *
   1057    * The output is calculated using this formula:
   1058    *
   1059    *     output = tanh(input)
   1060    *
   1061    * Supported tensor types:
   1062    * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
   1063    *
   1064    * Supported tensor rank: up to 4.
   1065    *
   1066    * Inputs:
   1067    * * 0: A tensor, specifying the input.
   1068    *
   1069    * Outputs:
   1070    * * 0: The output tensor of same shape as input0.
   1071    */
   1072   ANEURALNETWORKS_TANH = 28,
   1073 };
   1074 
   1075 /**
   1076  * Fused activation function types.
   1077  *
   1078  */
   1079 enum {
   1080   /** NO fused activation function. */
   1081   ANEURALNETWORKS_FUSED_NONE = 0,
   1082   /** Fused ReLU activation function. */
   1083   ANEURALNETWORKS_FUSED_RELU = 1,
   1084   /** Fused ReLU1 activation function. */
   1085   ANEURALNETWORKS_FUSED_RELU1 = 2,
   1086   /** Fused ReLU6 activation function. */
   1087   ANEURALNETWORKS_FUSED_RELU6 = 3,
   1088 };
   1089 
   1090 /**
   1091  * Execution preferences.
   1092  */
   1093 enum {
   1094   /**
   1095    * Prefer executing in a way that minimizes battery drain.
   1096    * This is desirable for compilations that will be executed often.
   1097    */
   1098   ANEURALNETWORKS_PREFER_LOW_POWER = 0,
   1099   /**
   1100    * Prefer returning a single answer as fast as possible, even if this causes
   1101    * more power consumption.
   1102    */
   1103   ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
   1104   /**
   1105    * Prefer maximizing the throughput of successive frames, for example when
   1106    * processing successive frames coming from the camera.
   1107    */
   1108   ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
   1109 };
   1110 
   1111 /**
   1112  * Result codes.
   1113  */
   1114 enum {
   1115   ANEURALNETWORKS_NO_ERROR = 0,
   1116   ANEURALNETWORKS_OUT_OF_MEMORY = 1,
   1117   ANEURALNETWORKS_INCOMPLETE = 2,
   1118   ANEURALNETWORKS_UNEXPECTED_NULL = 3,
   1119   ANEURALNETWORKS_BAD_DATA = 4,
   1120   ANEURALNETWORKS_OP_FAILED = 5,
   1121   ANEURALNETWORKS_UNMAPPABLE = 5,
   1122   ANEURALNETWORKS_BAD_STATE = 6,
   1123 };
   1124 
   1125 /**
   1126  * ANeuralNetworksMemory is an opaque type that represents memory.
   1127  *
   1128  * This type is used to represent shared memory, memory mapped files,
   1129  * and similar memories.
   1130  *
   1131  * By using shared memory, a program can efficiently communicate to the
   1132  * runtime and drivers the tensors that define a model. See
   1133  * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application
   1134  * should typically create one shared memory object that contains every tensor
   1135  * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be
   1136  * used to create shared memory from a file handle. {@link
   1137  * ANeuralNetworksMemory_createShared} can be used to directly created shared
   1138  * memory.
   1139  *
   1140  * Memory objects can also be used to specify the input and output arguments of
   1141  * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
   1142  * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
   1143  */
   1144 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
   1145 
   1146 /**
   1147  * ANeuralNetworksModel is an opaque type that contains a description of the
   1148  * mathematical operations that constitute the model.
   1149  *
   1150  * <p>The model will be built by calling<ul>
   1151  * <li>{@link ANeuralNetworksModel_create},</li>
   1152  * <li>{@link ANeuralNetworksModel_addOperation},</li>
   1153  * <li>{@link ANeuralNetworksModel_addOperand},</li>
   1154  * </ul>
   1155  *
   1156  * A model is completed by calling {@link ANeuralNetworksModel_finish}.
   1157  * A model is destroyed by calling {@link ANeuralNetworksModel_free}.
   1158  *
   1159  * <p>It is the application's responsibility to make sure that only one thread
   1160  * modifies a model at a given time. It is however safe for more than one
   1161  * thread to use the model once {@link ANeuralNetworksModel_finish} has
   1162  * returned.</p>
   1163  *
   1164  * <p>It is also the application's responsibility to ensure that there are no
   1165  * other uses of the model after calling {@link ANeuralNetworksModel_free}. This
   1166  * includes any compilation or execution object created using the model.</p>
   1167  */
   1168 typedef struct ANeuralNetworksModel ANeuralNetworksModel;
   1169 
   1170 /**
   1171  * ANeuralNetworksCompilation is an opaque type that can be used to compile
   1172  * a machine learning model.
   1173  *
   1174  * <p>To use:<ul>
   1175  *    <li>Create a new compilation instance by calling the
   1176  *        {@link ANeuralNetworksCompilation_create} function.</li>
   1177  *    <li>Perform the compilation with {@link
   1178  * ANeuralNetworksCompilation_start}.</li> <li>Wait for the compilation to
   1179  * complete with {@link ANeuralNetworksCompilation_wait}.</li> <li>Use the
   1180  * compilation as many times as needed with {@link
   1181  * ANeuralNetworksExecution_create}.</li> <li>Destroy the compilation with
   1182  * {@link ANeuralNetworksCompilation_free} once all executions using the
   1183  * compilation have completed.</li></ul></p>
   1184  *
   1185  * <p>A compilation cannot be modified once {@link
   1186  * ANeuralNetworksCompilation_start} has been called on it.</p>
   1187  *
   1188  * <p>It is the application's responsibility to make sure that only one thread
   1189  * modifies a compilation at a given time. It is however safe for more than one
   1190  * thread to use {@link ANeuralNetworksCompilation_wait} at the same time.
   1191  * It is also safe for multiple threads to use a compilation object once
   1192  * {@link ANeuralNetworksCompilation_wait} has completed.</p>
   1193  *
   1194  * <p>It is also the application's responsibility to ensure that there are no
   1195  * other uses of the compilation after calling {@link
   1196  * ANeuralNetworksCompilation_free}. This includes any execution object created
   1197  * using the compilation.</p>
   1198  */
   1199 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
   1200 
   1201 /**
   1202  * ANeuralNetworksExecution is an opaque type that can be used to apply a
   1203  * machine learning model to a set of inputs.
   1204  *
   1205  * <p>To use:<ul>
   1206  *    <li>Create a new execution instance by calling the
   1207  *        {@link ANeuralNetworksExecution_create} function.</li>
   1208  *    <li>Associate data to the model inputs with
   1209  *        {@link ANeuralNetworksExecution_setInput} or
   1210  *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
   1211  *    <li>Associate output buffers to the model outputs with
   1212  *        {@link ANeuralNetworksExecution_setOutput} or
   1213  *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
   1214  *    <li>Apply the model with {@link
   1215  * ANeuralNetworksExecution_startCompute}.</li> <li>Wait for the execution to
   1216  * complete with {@link ANeuralNetworksExecution_wait}.</li> <li>Destroy the
   1217  * execution with
   1218  *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
   1219  *
   1220  * <p>An execution cannot be modified once {@link
   1221  * ANeuralNetworksExecution_start} has been called on it.</p>
   1222  *
   1223  * <p>An execution can be applied to a model with
   1224  * {@link ANeuralNetworksExecution_startCompute} only once. Create new
   1225  * executions to do new evaluations of the model.</p>
   1226  *
   1227  * <p>It is the application's responsibility to make sure that only one thread
   1228  * modifies an execution at a given time. It is however safe for more than one
   1229  * thread to use {@link ANeuralNetworksExecution_wait} at the same time.</p>
   1230  *
   1231  * <p>It is also the application's responsibility to ensure that there are no
   1232  * other uses of the request after calling {@link
   1233  * ANeuralNetworksRequest_free}.</p>
   1234  */
   1235 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
   1236 
   1237 /**
   1238  * ANeuralNetworksOperandType describes the type of an operand.
   1239  * This structure is used to describe both scalars and tensors.
   1240  */
   1241 typedef struct ANeuralNetworksOperandType {
   1242   /** The data type, e.g ANEURALNETWORKS_INT8. */
   1243   int32_t type;
   1244   /** The number of dimensions. It should be 0 for scalars. */
   1245   uint32_t dimensionCount;
   1246   /** The dimensions of the tensor. It should be nullptr for scalars. */
   1247   const uint32_t* dimensions;
   1248   /** These two fields are only used for quantized tensors.
   1249    * They should be zero for scalars and non-fixed point tensors.
   1250    * The dequantized value of each entry is (value - offset) * scale.
   1251    */
   1252   float scale;
   1253   int32_t zeroPoint;
   1254 } ANeuralNetworksOperandType;
   1255 
   1256 /**
   1257  * ANeuralNetworksEvent is an opaque type that represents an event
   1258  * that will be signaled once an execution completes.
   1259  */
   1260 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
   1261 
   1262 typedef int32_t ANeuralNetworksOperationType;
   1263 
   1264 // nn api function types
   1265 
   1266 typedef int (*ANeuralNetworksMemory_createFromFd_fn)(
   1267     size_t size, int protect, int fd, size_t offset,
   1268     ANeuralNetworksMemory** memory);
   1269 
   1270 typedef void (*ANeuralNetworksMemory_free_fn)(ANeuralNetworksMemory* memory);
   1271 
   1272 typedef int (*ANeuralNetworksModel_create_fn)(ANeuralNetworksModel** model);
   1273 
   1274 typedef int (*ANeuralNetworksModel_finish_fn)(ANeuralNetworksModel* model);
   1275 
   1276 typedef void (*ANeuralNetworksModel_free_fn)(ANeuralNetworksModel* model);
   1277 
   1278 typedef int (*ANeuralNetworksCompilation_create_fn)(
   1279     ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation);
   1280 
   1281 typedef void (*ANeuralNetworksCompilation_free_fn)(
   1282     ANeuralNetworksCompilation* compilation);
   1283 
   1284 typedef int (*ANeuralNetworksCompilation_setPreference_fn)(
   1285     ANeuralNetworksCompilation* compilation, int32_t preference);
   1286 
   1287 typedef int (*ANeuralNetworksCompilation_finish_fn)(
   1288     ANeuralNetworksCompilation* compilation);
   1289 
   1290 typedef int (*ANeuralNetworksModel_addOperand_fn)(
   1291     ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type);
   1292 
   1293 typedef int (*ANeuralNetworksModel_setOperandValue_fn)(
   1294     ANeuralNetworksModel* model, int32_t index, const void* buffer,
   1295     size_t length);
   1296 
   1297 typedef int (*ANeuralNetworksModel_setOperandValueFromMemory_fn)(
   1298     ANeuralNetworksModel* model, int32_t index,
   1299     const ANeuralNetworksMemory* memory, size_t offset, size_t length);
   1300 
   1301 typedef int (*ANeuralNetworksModel_addOperation_fn)(
   1302     ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
   1303     uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
   1304     const uint32_t* outputs);
   1305 
   1306 typedef int (*ANeuralNetworksModel_identifyInputsAndOutputs_fn)(
   1307     ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
   1308     uint32_t outputCount, const uint32_t* outputs);
   1309 
   1310 typedef int (*ANeuralNetworksExecution_create_fn)(
   1311     ANeuralNetworksCompilation* compilation,
   1312     ANeuralNetworksExecution** execution);
   1313 
   1314 typedef void (*ANeuralNetworksExecution_free_fn)(
   1315     ANeuralNetworksExecution* execution);
   1316 
   1317 typedef int (*ANeuralNetworksExecution_setInput_fn)(
   1318     ANeuralNetworksExecution* execution, int32_t index,
   1319     const ANeuralNetworksOperandType* type, const void* buffer, size_t length);
   1320 
   1321 typedef int (*ANeuralNetworksExecution_setInputFromMemory_fn)(
   1322     ANeuralNetworksExecution* execution, int32_t index,
   1323     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
   1324     size_t offset, size_t length);
   1325 
   1326 typedef int (*ANeuralNetworksExecution_setOutput_fn)(
   1327     ANeuralNetworksExecution* execution, int32_t index,
   1328     const ANeuralNetworksOperandType* type, void* buffer, size_t length);
   1329 
   1330 typedef int (*ANeuralNetworksExecution_setOutputFromMemory_fn)(
   1331     ANeuralNetworksExecution* execution, int32_t index,
   1332     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
   1333     size_t offset, size_t length);
   1334 
   1335 typedef int (*ANeuralNetworksExecution_startCompute_fn)(
   1336     ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event);
   1337 
   1338 typedef int (*ANeuralNetworksEvent_wait_fn)(ANeuralNetworksEvent* event);
   1339 
   1340 typedef void (*ANeuralNetworksEvent_free_fn)(ANeuralNetworksEvent* event);
   1341 
   1342 /**
   1343  * Creates a shared memory object from a file descriptor.
   1344  *
   1345  * The shared memory is backed by a file descriptor via mmap.
   1346  * See {@link ANeuralNetworksMemory} for a description on how to use
   1347  * this shared memory.
   1348  *
   1349  * @param size The requested size in bytes.
   1350  *             Must not be larger than the file size.
   1351  * @param prot The desired memory protection for the mapping.
   1352  *             It is either PROT_NONE or the bitwise OR of one or
   1353  *             more of the following flags: PROT_READ, PROT_WRITE.
   1354  * @param fd The requested file descriptor.
   1355  *           The file descriptor has to be mmap-able. The file
   1356  *           descriptor will be duplicated.
   1357  * @param offset The offset to the beginning of the file of the area to map.
   1358  *               The offset has to be aligned to a page size.
   1359  * @param memory The memory object to be created.
   1360  *               Set to NULL if unsuccessful.
   1361  *
   1362  * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
   1363  */
   1364 inline int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd,
   1365                                               size_t offset,
   1366                                               ANeuralNetworksMemory** memory) {
   1367   LOAD_FUNCTION(ANeuralNetworksMemory_createFromFd);
   1368   EXECUTE_FUNCTION_RETURN(size, protect, fd, offset, memory);
   1369 }
   1370 
   1371 /**
   1372  * Delete a memory object.
   1373  *
   1374  * Destroys the object used by the run time to keep track of the memory.
   1375  * This will free the underlying actual memory if no other code has open
   1376  * handles to this memory.
   1377  *
   1378  * @param memory The memory object to be freed.
   1379  */
   1380 inline void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) {
   1381   LOAD_FUNCTION(ANeuralNetworksMemory_free);
   1382   EXECUTE_FUNCTION(memory);
   1383 }
   1384 
   1385 /**
   1386  * Create an empty {@link ANeuralNetworksModel}.
   1387  *
   1388  * <p>This only creates the object. Computation is performed once
   1389  * {@link ANeuralNetworksExecution_startCompute} is invoked.
   1390  *
   1391  * The model should be constructed with calls to
   1392  * {@link ANeuralNetworksModel_addOperation} and
   1393  * {@link ANeuralNetworksModel_addOperand}
   1394  *
   1395  * <p>{@link ANeuralNetworksModel_finish} should be called once the model
   1396  * has been fully constructed.</p>
   1397  *
   1398  * <p>{@link ANeuralNetworksModel_free} should be called once the model
   1399  * is no longer needed.</p>
   1400  *
   1401  * @param model The {@link ANeuralNetworksModel} to be created.
   1402  *              Set to NULL if unsuccessful.
   1403  *
   1404  * @return ANEURALNETWORKS_NO_ERROR if successful.
   1405  */
   1406 inline int ANeuralNetworksModel_create(ANeuralNetworksModel** model) {
   1407   LOAD_FUNCTION(ANeuralNetworksModel_create);
   1408   EXECUTE_FUNCTION_RETURN(model);
   1409 }
   1410 
   1411 /**
   1412  * Destroy a model.
   1413  *
   1414  * The model need not have been finished by a call to
   1415  * {@link ANeuralNetworksModel_finish}.
   1416  *
   1417  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
   1418  *
   1419  * @param model The model to be destroyed. Passing NULL is acceptable and
   1420  *              results in no operation.
   1421  */
   1422 inline void ANeuralNetworksModel_free(ANeuralNetworksModel* model) {
   1423   LOAD_FUNCTION(ANeuralNetworksModel_free);
   1424   EXECUTE_FUNCTION(model);
   1425 }
   1426 
   1427 /**
   1428  * Indicate that we have finished modifying a model. Required before
   1429  * calling {@link ANeuralNetworksCompilation_compile}.
   1430  *
   1431  * An application is responsible to make sure that no other thread uses
   1432  * the model at the same time.
   1433  *
   1434  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
   1435  *
   1436  * @param model The model to be finished.
   1437  *
   1438  * @return ANEURALNETWORKS_NO_ERROR if successful.
   1439  */
   1440 inline int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) {
   1441   LOAD_FUNCTION(ANeuralNetworksModel_finish);
   1442   EXECUTE_FUNCTION_RETURN(model);
   1443 }
   1444 
   1445 /**
   1446  * Add an operand to a model.
   1447  *
   1448  * The order in which the operands are added is important. The first one added
   1449  * to a model will have the index value 0, the second 1, etc. These indexes are
   1450  * used as operand identifiers in {@link ANeuralNetworksModel_addOperation},
   1451  * {@link ANeuralNetworksExecution_setInput},
   1452  * {@link ANeuralNetworksExecution_setInputFromMemory},
   1453  * {@link ANeuralNetworksExecution_setOutput},
   1454  * {@link ANeuralNetworksExecution_setOutputFromMemory} and
   1455  * {@link ANeuralNetworksExecution_setOperandValue}.
   1456  *
   1457  * To build a model that can accommodate inputs of various sizes, as you may
   1458  * want to do for a CNN, set the size of the dimensions that will vary at run
   1459  * time to 0. If you do so, provide the full dimensions when calling
   1460  * {@link ANeuralNetworksExecution_setInput} or {@link
   1461  * ANeuralNetworksExecution_setInputFromMemory}.
   1462  *
   1463  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
   1464  * been called will return an error.
   1465  *
   1466  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
   1467  *
   1468  * @param model The model to be modified.
   1469  * @param type The {@link ANeuralNetworksOperandType} that describes the shape
   1470  * of the operand.
   1471  *
   1472  * @return ANEURALNETWORKS_NO_ERROR if successful.
   1473  */
   1474 inline int ANeuralNetworksModel_addOperand(
   1475     ANeuralNetworksModel* model, const ANeuralNetworksOperandType* type) {
   1476   LOAD_FUNCTION(ANeuralNetworksModel_addOperand);
   1477   EXECUTE_FUNCTION_RETURN(model, type);
   1478 }
   1479 
   1480 /**
   1481  * Sets an operand to a constant value.
   1482  *
   1483  * For scalar values, the content of buffer is copied into the model.
   1484  *
   1485  * For tensor values, a pointer to the buffer is stored within the model.
   1486  * The application is responsible for not changing the content of this region
   1487  * until all executions using this model have completed. As the data may
   1488  * be copied during processing, modifying the data after this call yields
   1489  * undefined results.
   1490  *
   1491  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
   1492  * been called will return an error.
   1493  *
   1494  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
   1495  *
   1496  * @param model The model to be modified.
   1497  * @param index The index of the model operand we're setting.
   1498  * @param buffer A pointer to the data to use.
   1499  * @param length The size in bytes of the data value.
   1500  *
   1501  * @return ANEURALNETWORKS_NO_ERROR if successful.
   1502  */
   1503 inline int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model,
   1504                                                 int32_t index,
   1505                                                 const void* buffer,
   1506                                                 size_t length) {
   1507   LOAD_FUNCTION(ANeuralNetworksModel_setOperandValue);
   1508   EXECUTE_FUNCTION_RETURN(model, index, buffer, length);
   1509 }
   1510 
   1511 /**
   1512  * Sets an operand to a value stored in a memory object.
   1513  *
   1514  * The content of the memory is not copied. A reference to that memory is stored
   1515  * inside the model. The application is responsible for not changing the content
   1516  * of the memory region until all executions using this model have completed.
   1517  * As the data may be copied during processing, modifying the data after this
   1518  * call yields undefined results.
   1519  *
   1520  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
   1521  * been called will return an error.
   1522  *
   1523  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
   1524  *
   1525  * @param model The model to be modified.
   1526  * @param index The index of the model operand we're setting.
   1527  * @param buffer A pointer to the data to use.
   1528  * @param memory The memory containing the data.
   1529  * @param offset This specifies the location of the data within the memory.
   1530  *               The offset is in bytes from the start of memory.
   1531  * @param length The size in bytes of the data value.
   1532  *
   1533  * @return ANEURALNETWORKS_NO_ERROR if successful.
   1534  */
   1535 inline int ANeuralNetworksModel_setOperandValueFromMemory(
   1536     ANeuralNetworksModel* model, int32_t index,
   1537     const ANeuralNetworksMemory* memory, size_t offset, size_t length) {
   1538   LOAD_FUNCTION(ANeuralNetworksModel_setOperandValueFromMemory);
   1539   EXECUTE_FUNCTION_RETURN(model, index, memory, offset, length);
   1540 }
   1541 
   1542 /**
   1543  * Add an operation to a model.
   1544  *
   1545  * @param model The model to be modified.
   1546  * @param type The type of the operation.
   1547  * @param inputCount The number of entries in the inputs array.
   1548  * @param inputs An array of indexes identifying each operand.
   1549  * @param outputCount The number of entries in the outputs array.
   1550  * @param outputs An array of indexes identifying each operand.
   1551  *
   1552  * The operands specified by inputs and outputs must have been
   1553  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
   1554  *
   1555  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
   1556  * been called will return an error.
   1557  *
   1558  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
   1559  *
   1560  * @return ANEURALNETWORKS_NO_ERROR if successful.
   1561  */
   1562 inline int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model,
   1563                                              ANeuralNetworksOperationType type,
   1564                                              uint32_t inputCount,
   1565                                              const uint32_t* inputs,
   1566                                              uint32_t outputCount,
   1567                                              const uint32_t* outputs) {
   1568   LOAD_FUNCTION(ANeuralNetworksModel_addOperation);
   1569   EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount,
   1570                           outputs);
   1571 }
   1572 
   1573 /**
   1574  * Specifies which operands will be the model's inputs and outputs.
   1575  *
   1576  * An operand cannot be used for both input and output. Doing so will
   1577  * return an error.
   1578  *
   1579  * @param model The model to be modified.
   1580  * @param inputCount The number of entries in the inputs array.
   1581  * @param inputs An array of indexes identifying the input operands.
   1582  * @param outputCount The number of entries in the outputs array.
   1583  * @param outputs An array of indexes identifying the output operands.
   1584  *
   1585  * The operands specified by inputs and outputs must have been
   1586  * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
   1587  *
   1588  * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
   1589  * been called will return an error.
   1590  *
   1591  * See {@link ANeuralNetworksModel} for information on multithreaded usage.
   1592  *
   1593  */
   1594 inline int ANeuralNetworksModel_identifyInputsAndOutputs(
   1595     ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs,
   1596     uint32_t outputCount, const uint32_t* outputs) {
   1597   LOAD_FUNCTION(ANeuralNetworksModel_identifyInputsAndOutputs);
   1598   EXECUTE_FUNCTION_RETURN(model, inputCount, inputs, outputCount, outputs);
   1599 }
   1600 
   1601 /**
   1602  * Create a {@link ANeuralNetworksCompilation} to compile the given model.
   1603  * This only creates the object. Compilation is only performed once
   1604  * {@link ANeuralNetworksCompilation_start} is invoked.
   1605  *
   1606  * <p>The provided model must outlive the compilation.</p>
   1607  *
   1608  * The model must already have been finished by a call to
   1609  * {@link ANeuralNetworksModel_finish}.
   1610  *
   1611  * See {@link ANeuralNetworksCompilation} for information on multithreaded
   1612  * usage.
   1613  *
   1614  * @param model The {@link ANeuralNetworksModel} to be compiled.
   1615  * @param compilation The newly created object or NULL if unsuccessful.
   1616  *
   1617  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
   1618  *         if the model is invalid.
   1619  */
   1620 inline int ANeuralNetworksCompilation_create(
   1621     ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation) {
   1622   LOAD_FUNCTION(ANeuralNetworksCompilation_create);
   1623   EXECUTE_FUNCTION_RETURN(model, compilation);
   1624 }
   1625 
   1626 /**
   1627  * Destroy a compilation.
   1628  *
   1629  * <p>If called on a compilation for which
   1630  * {@link ANeuralNetworksCompilation_start} has been called, the
   1631  * function will return immediately but will mark the compilation to be deleted
   1632  * once the compilation completes. The {@link ANeuralNetworksCompilation_wait}
   1633  * will return ERROR_DELETED.
   1634  *
   1635  * See {@link ANeuralNetworksCompilation} for information on multithreaded
   1636  * usage.
   1637  *
   1638  * @param compilation The compilation to be destroyed. Passing NULL is
   1639  * acceptable and results in no operation.
   1640  */
   1641 inline void ANeuralNetworksCompilation_free(
   1642     ANeuralNetworksCompilation* compilation) {
   1643   LOAD_FUNCTION(ANeuralNetworksCompilation_free);
   1644   EXECUTE_FUNCTION(compilation);
   1645 }
   1646 
   1647 /**
   1648  * Sets the execution preference.
   1649  *
   1650  * <p>Provides guidance to the runtime when trade-offs are possible.</p>
   1651  *
   1652  * See {@link ANeuralNetworksCompilation} for information on multithreaded
   1653  * usage.
   1654  *
   1655  * @param compilation The compilation to be modified.
   1656  * @param preference Either {@link PREFER_LOW_POWER},
   1657  *                  {@link PREFER_SINGLE_FAST_ANSWER}, or
   1658  *                  {@link PREFER_SUSTAINED_SPEED}.
   1659  *
   1660  * @return ANEURALNETWORKS_NO_ERROR if successful.
   1661  */
   1662 inline int ANeuralNetworksCompilation_setPreference(
   1663     ANeuralNetworksCompilation* compilation, int32_t preference) {
   1664   LOAD_FUNCTION(ANeuralNetworksCompilation_setPreference);
   1665   EXECUTE_FUNCTION_RETURN(compilation, preference);
   1666 }
   1667 
   1668 /**
   1669  * Waits until the compilation completes.
   1670  *
   1671  * More than one thread can wait on a compilation. When the compilation
   1672  * completes, all threads will be released.
   1673  *
   1674  * See {@link ANeuralNetworksCompilation} for information on multithreaded
   1675  * usage.
   1676  *
   1677  * @return ANEURALNETWORKS_NO_ERROR if the compilation completed normally.
   1678  */
   1679 inline int ANeuralNetworksCompilation_finish(
   1680     ANeuralNetworksCompilation* compilation) {
   1681   LOAD_FUNCTION(ANeuralNetworksCompilation_finish);
   1682   EXECUTE_FUNCTION_RETURN(compilation);
   1683 }
   1684 /**
   1685  * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
   1686  * This only creates the object. Computation is only performed once
   1687  * {@link ANeuralNetworksExecution_startCompute} is invoked.
   1688  *
   1689  * <p>The provided compilation must outlive the execution.</p>
   1690  *
   1691  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
   1692  *
   1693  * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
   1694  * @param execution The newly created object or NULL if unsuccessful.
   1695  *
   1696  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
   1697  *         if the compilation is invalid.
   1698  */
   1699 inline int ANeuralNetworksExecution_create(
   1700     ANeuralNetworksCompilation* compilation,
   1701     ANeuralNetworksExecution** execution) {
   1702   LOAD_FUNCTION(ANeuralNetworksExecution_create);
   1703   EXECUTE_FUNCTION_RETURN(compilation, execution);
   1704 }
   1705 
   1706 /**
   1707  * Destroy an execution.
   1708  *
   1709  * <p>If called on an execution for which
   1710  * {@link ANeuralNetworksExecution_startCompute} has been called, the
   1711  * function will return immediately but will mark the execution to be deleted
   1712  * once the computation completes.   The {link ANeuralNetworksExecution_wait}
   1713  * will return ANEURALNETWORKS_ERROR_DELETED.
   1714  *
   1715  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
   1716  *
   1717  * @param execution The execution to be destroyed. Passing NULL is acceptable
   1718  * and results in no operation.
   1719  */
   1720 inline void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) {
   1721   LOAD_FUNCTION(ANeuralNetworksExecution_free);
   1722   EXECUTE_FUNCTION(execution);
   1723 }
   1724 
   1725 /**
   1726  * Associate a user buffer with an input of the model of the
   1727  * {@link ANeuralNetworksExecution}.
   1728  *
   1729  * <p>The provided buffer must outlive the execution.</p>
   1730  *
   1731  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
   1732  *
   1733  * @param execution The execution to be modified.
   1734  * @param index The index of the input argument we are setting. It is
   1735  *              an index into the lists passed to
   1736  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
   1737  *              the index associated with {@link
   1738  * ANeuralNetworksModel_addOperand}.
   1739  * @param type The type of the operand. This should be used to specify the
   1740  *             dimensions that were set to 0 when the operand was added to the
   1741  *             model. All other properties of the type must be the same as
   1742  *             specified in the model. If the type is the same as specified
   1743  *             when the model was built, NULL can be passed.
   1744  * @param buffer The buffer containing the data.
   1745  * @param length The length in bytes of the buffer.
   1746  *
   1747  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
   1748  * the name is not recognized or the buffer is too small for the input.
   1749  */
   1750 inline int ANeuralNetworksExecution_setInput(
   1751     ANeuralNetworksExecution* execution, int32_t index,
   1752     const ANeuralNetworksOperandType* type, const void* buffer, size_t length) {
   1753   LOAD_FUNCTION(ANeuralNetworksExecution_setInput);
   1754   EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
   1755 }
   1756 
   1757 /**
   1758  * Associate part of a memory object with an input of the model of the
   1759  * {@link ANeuralNetworksExecution}.
   1760  *
   1761  * <p>The provided memory must outlive the execution.</p>
   1762  *
   1763  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
   1764  *
   1765  * @param execution The execution to be modified.
   1766  * @param index The index of the input argument we are setting. It is
   1767  *              an index into the lists passed to
   1768  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
   1769  *              the index associated with {@link
   1770  * ANeuralNetworksModel_addOperand}.
   1771  * @param type The type of the operand. This can be used to specify the
   1772  *             dimensions that were set to 0 when the operand was added to the
   1773  *             model. All other values must be the same as specified in the
   1774  *             model. If the type is the same as specified when the model
   1775  *             was built, NULL can be passed.
   1776  * @param memory The memory containing the data.
   1777  * @param offset This specifies the location of the data within the memory.
   1778  *               The offset is in bytes from the start of memory.
   1779  * @param length The size in bytes of the data value.
   1780  *
   1781  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
   1782  * the name is not recognized or the buffer is too small for the input.
   1783  */
   1784 inline int ANeuralNetworksExecution_setInputFromMemory(
   1785     ANeuralNetworksExecution* execution, int32_t index,
   1786     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
   1787     size_t offset, size_t length) {
   1788   LOAD_FUNCTION(ANeuralNetworksExecution_setInputFromMemory);
   1789   EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
   1790 }
   1791 
   1792 /**
   1793  * Associate a user buffer with an output of the model of the
   1794  * {@link ANeuralNetworksExecution}.
   1795  *
   1796  * <p>The provided buffer must outlive the execution.</p>
   1797  *
   1798  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
   1799  *
   1800  * @param execution The execution to be modified.
   1801  * @param index The index of the output argument we are setting. It is
   1802  *              an index into the lists passed to
   1803  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
   1804  *              the index associated with {@link
   1805  * ANeuralNetworksModel_addOperand}.
   1806  * @param type The type of the operand. This can be used to specify the
   1807  *             dimensions that were set to 0 when the operand was added to the
   1808  *             model. All other values must be the same as specified in the
   1809  *             model. If the type is the same as specified when the model
   1810  *             was built, NULL can be passed.
   1811  * @param buffer The buffer where the data is to be written.
   1812  * @param length The length in bytes of the buffer.
   1813  *
   1814  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
   1815  * the name is not recognized or the buffer is too small for the output.
   1816  */
   1817 inline int ANeuralNetworksExecution_setOutput(
   1818     ANeuralNetworksExecution* execution, int32_t index,
   1819     const ANeuralNetworksOperandType* type, void* buffer, size_t length) {
   1820   LOAD_FUNCTION(ANeuralNetworksExecution_setOutput);
   1821   EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
   1822 }
   1823 
   1824 /**
   1825  * Associate part of a memory object with an output of the model of the
   1826  * {@link ANeuralNetworksExecution}.
   1827  *
   1828  * <p>The provided memory must outlive the execution.</p>
   1829  *
   1830  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
   1831  *
   1832  * @param execution The execution to be modified.
   1833  * @param index The index of the output argument we are setting. It is
   1834  *              an index into the lists passed to
   1835  *              {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
   1836  *              the index associated with {@link
   1837  * ANeuralNetworksModel_addOperand}.
   1838  * @param type The type of the operand. This can be used to specify the
   1839  *             dimensions that were set to 0 when the operand was added to the
   1840  *             model. All other values must be the same as specified in the
   1841  *             model. If the type is the same as specified when the model
   1842  *             was built, NULL can be passed.
   1843  * @param memory The memory where the data is to be stored.
   1844  * @param offset This specifies the location of the data within the memory.
   1845  *               The offset is in bytes from the start of memory.
   1846  * @param length The length in bytes of the data value.
   1847  *
   1848  * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
   1849  * the name is not recognized or the buffer is too small for the output.
   1850  */
   1851 inline int ANeuralNetworksExecution_setOutputFromMemory(
   1852     ANeuralNetworksExecution* execution, int32_t index,
   1853     const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory,
   1854     size_t offset, size_t length) {
   1855   LOAD_FUNCTION(ANeuralNetworksExecution_setOutputFromMemory);
   1856   EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
   1857 }
   1858 
   1859 /**
   1860  * Schedule evaluation of the execution.
   1861  *
   1862  * <p>Schedules evaluation of the execution. Once the model has been
   1863  * applied and the outputs are ready to be consumed, the execution will be
   1864  * signaled. Use {@link ANeuralNetworksExecution_wait} to wait for that signal.
   1865  * </p>
   1866  *
   1867  * Multiple executions can be scheduled and evaluated concurrently, and
   1868  * compilations can be performed concurrently with executions. The runtime makes
   1869  * no guarantee on the ordering of the completion of compilations and
   1870  * executions. If it's important to the application, the application should
   1871  * enforce the ordering by using {@link ANeuralNetworksCompilation_wait} and
   1872  * {@link ANeuralNetworksExecution_wait}.
   1873  *
   1874  * ANeuralNetworksExecution_wait must be called to recuperate the resources used
   1875  * by the execution.
   1876  *
   1877  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
   1878  *
   1879  * @param execution The execution to be scheduled and executed.
   1880  *
   1881  * @return ANEURALNETWORKS_NO_ERROR if successful.
   1882  */
   1883 inline int ANeuralNetworksExecution_startCompute(
   1884     ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event) {
   1885   LOAD_FUNCTION(ANeuralNetworksExecution_startCompute);
   1886   EXECUTE_FUNCTION_RETURN(execution, event);
   1887 }
   1888 
   1889 /**
   1890  * Waits until the execution completes.
   1891  *
   1892  * More than one thread can wait on an event. When the execution completes,
   1893  * all threads will be released.
   1894  *
   1895  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
   1896  *
   1897  * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
   1898  */
   1899 inline int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) {
   1900   LOAD_FUNCTION(ANeuralNetworksEvent_wait);
   1901   EXECUTE_FUNCTION_RETURN(event);
   1902 }
   1903 
   1904 /**
   1905  * Destroys the event.
   1906  *
   1907  * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
   1908  */
   1909 inline void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) {
   1910   LOAD_FUNCTION(ANeuralNetworksEvent_free);
   1911   EXECUTE_FUNCTION(event);
   1912 }
   1913 
   1914 /**/
   1915 
   1916 #endif  // NN_API_SHIM_H0
   1917