Home | History | Annotate | Download | only in schema
      1 // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //     http://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 namespace tflite;
     16 
     17 // The type of data stored in a tensor.
     18 enum TensorType : byte {
     19   FLOAT32 = 0,
     20   FLOAT16 = 1,
     21   INT32 = 2,
     22   UINT8 = 3,
     23   INT64 = 4,
     24 }
     25 
     26 // Parameters for converting a quantized tensor back to float. Given a
     27 // quantized value q, the corresponding float value f should be:
     28 //   f = scale * (q - zero_point)
     29 table QuantizationParameters {
     30   min:[float];  // For importing back into tensorflow.
     31   max:[float];  // For importing back into tensorflow.
     32   scale:[float];
     33   zero_point:[long];
     34 }
     35 
     36 table Tensor {
     37   // The tensor shape. The meaning of each entry is operator-specific but
     38   // builtin ops use: [batch size, number of channels, height, width] (That's
     39   // Tensorflow's NCHW).
     40   shape:[int];
     41   type:TensorType;
     42   // The data_buffer is an opaque container, with the assumption that the
     43   // target device is little-endian. In addition, all builtin operators assume
     44   // the memory is ordered such that if `shape` is [4, 3, 2], then index
     45   // [i, j, k] maps to data_buffer[i*4*3 + j*3 + k].
     46   data_buffer:[ubyte];
     47   name:string;  // For debugging and importing back into tensorflow.
     48   quantization:QuantizationParameters;  // Optional.
     49 }
     50 
     51 // A list of builtin operators. Builtin operators a slighlty faster than custom
     52 // ones, but not by much. Moreover, while custom operators accept an opaque
     53 // object containing configuration parameters, builtins have a predetermined
     54 // set of acceptable options.
     55 enum BuiltinOperator : byte {
     56   CUSTOM = 0,
     57   CONVOLUTION = 1,
     58   DEPTHWISE_CONVOLUTION = 2,
     59   CONCAT_EMBEDDINGS = 3,
     60   LSH_PROJECTION = 4,
     61   TANH = 5,
     62   RELU = 6,
     63   AVERAGE_POOL = 7,
     64   MAX_POOL = 8,
     65   L2_POOL = 9,
     66   SIGMOID = 10,
     67   SVDF = 11,
     68   BasicRNN = 12,
     69   RELU6 = 13,
     70   EMBEDDING_LOOKUP = 14,
     71   FULLY_CONNECTED = 15,
     72   HASHTABLE_LOOKUP = 16,
     73   SOFTMAX = 17,
     74   CONCATENATION = 18,
     75   LSTM = 19,
     76   ADD = 20,
     77   L2NORM = 21,
     78   LOCAL_RESPONSE_NORM = 22,
     79   RESIZE_BILINEAR = 23,
     80 }
     81 
     82 // Options for the builtin operators.
     83 union BuiltinOptions {
     84   ConvolutionOptions,
     85   DepthwiseConvolutionOptions,
     86   ConcatEmbeddingsOptions,
     87   LSHProjectionOptions,
     88   PoolOptions,
     89   SVDFOptions,
     90   BasicRNNOptions,
     91   FullyConnectedOptions,
     92   SoftmaxOptions,
     93   ConcatenationOptions,
     94   AddOptions,
     95   L2NormOptions,
     96   LocalResponseNormOptions,
     97   LSTMOptions,
     98   ResizeBilinearOptions,
     99 }
    100 
    101 enum Padding : byte { SAME, VALID }
    102 
    103 enum ActivationFunctionType : byte {
    104   NONE = 0,
    105   RELU = 1,
    106   RELU1 = 2,
    107   RELU6 = 3,
    108   TANH = 4,
    109   SIGN_BIT = 5,
    110 }
    111 
    112 table ConvolutionOptions {
    113   padding:Padding;
    114   stride_w:int;
    115   stride_h:int;
    116   fused_activation_function:ActivationFunctionType;
    117 }
    118 
    119 table PoolOptions {
    120   padding:Padding;
    121   stride_w:int;
    122   stride_h:int;
    123   filter_width:int;
    124   filter_height:int;
    125   fused_activation_function:ActivationFunctionType;
    126 }
    127 
    128 table DepthwiseConvolutionOptions {
    129   padding:Padding;
    130   stride_w:int;
    131   stride_h:int;
    132   depth_multiplier:int;
    133   fused_activation_function:ActivationFunctionType;
    134 }
    135 
    136 table ConcatEmbeddingsOptions {
    137   num_channels:int;
    138   num_columns_per_channel:[int];
    139   embedding_dim_per_channel:[int]; // This could be inferred from parameters.
    140 }
    141 
    142 enum LSHProjectionType: byte {
    143   UNKNOWN = 0,
    144   SPARSE = 1,
    145   DENSE = 2,
    146 }
    147 
    148 table LSHProjectionOptions {
    149   type: LSHProjectionType;
    150 }
    151 
    152 table SVDFOptions {
    153   rank:int;
    154   fused_activation_function:ActivationFunctionType;
    155 }
    156 
    157 // An implementation of TensorFlow BasicRNNCell.
    158 table BasicRNNOptions {
    159   fused_activation_function:ActivationFunctionType;
    160 }
    161 
    162 // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
    163 table FullyConnectedOptions {
    164   fused_activation_function:ActivationFunctionType;
    165 }
    166 
    167 table SoftmaxOptions {
    168   beta: float;
    169 }
    170 
    171 // An implementation of TensorFlow concat.
    172 table ConcatenationOptions {
    173   axis:int;
    174   fused_activation_function:ActivationFunctionType;
    175 }
    176 
    177 table AddOptions {
    178   fused_activation_function:ActivationFunctionType;
    179 }
    180 
    181 table L2NormOptions {
    182   fused_activation_function:ActivationFunctionType;
    183 }
    184 
    185 table LocalResponseNormOptions {
    186   radius:int;
    187   bias:float;
    188   alpha:float;
    189   beta:float;
    190 }
    191 
    192 // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
    193 table LSTMOptions {
    194   fused_activation_function:ActivationFunctionType;
    195   cell_clip: float; // Optional, 0.0 means no clipping
    196   proj_clip: float; // Optional, 0.0 means no clipping
    197 }
    198 
    199 table ResizeBilinearOptions {
    200   new_height:int;
    201   new_width:int;
    202 }
    203 
    204 // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
    205 // builtin, or a string if the operator is custom.
    206 table OperatorCode {
    207   builtin_code:BuiltinOperator;
    208   custom_code:string;
    209 }
    210 
    211 // An operator takes tensors as inputs and outputs. The type of operation being
    212 // performed is determined by an index into the list of valid OperatorCodes,
    213 // while the specifics of each operations is configured using builtin_options
    214 // or custom_options.
    215 table Operator {
    216   // Index into the operator_codes array. Using an integer here avoids
    217   // complicate map lookups.
    218   opcode_index:int;
    219 
    220   inputs:[int];
    221   outputs:[int];
    222 
    223   builtin_options:BuiltinOptions;
    224   custom_options:[ubyte];
    225 }
    226 
    227 // The root type, defining a model.
    228 table Model {
    229   // A list of all tensors used in this model.
    230   tensors:[Tensor];
    231 
    232   // Indices of the input tensors.
    233   inputs:[int];
    234 
    235   // Indices of the output tensors.
    236   outputs:[int];
    237 
    238   // A list of all operator codes used in this model. This is
    239   // kept in order because operators carry an index into this
    240   // vector.
    241   operator_codes:[OperatorCode];
    242 
    243   // All operators, in execution order.
    244   operators:[Operator];
    245 }
    246 
    247 root_type Model;
    248