Home | History | Annotate | Download | only in schema
      1 // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 //
      3 // Licensed under the Apache License, Version 2.0 (the "License");
      4 // you may not use this file except in compliance with the License.
      5 // You may obtain a copy of the License at
      6 //
      7 //     http://www.apache.org/licenses/LICENSE-2.0
      8 //
      9 // Unless required by applicable law or agreed to in writing, software
     10 // distributed under the License is distributed on an "AS IS" BASIS,
     11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 // See the License for the specific language governing permissions and
     13 // limitations under the License.
     14 
     15 // Revision History
     16 // Version 0: Initial version.
     17 // Version 1: Add subgraphs to schema.
     18 // Version 2: Rename operators to conform to NN API.
     19 // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
     20 
     21 namespace tflite;
     22 
     23 // This corresponds to the version (4).
     24 file_identifier "TFL3";
     25 // File extension of any written files.
     26 file_extension "tflite";
     27 
     28 // The type of data stored in a tensor.
     29 enum TensorType : byte {
     30   FLOAT32 = 0,
     31   FLOAT16 = 1,
     32   INT32 = 2,
     33   UINT8 = 3,
     34   INT64 = 4,
     35   STRING = 5,
     36 }
     37 
     38 // Parameters for converting a quantized tensor back to float. Given a
     39 // quantized value q, the corresponding float value f should be:
     40 //   f = scale * (q - zero_point)
     41 table QuantizationParameters {
     42   min:[float];  // For importing back into tensorflow.
     43   max:[float];  // For importing back into tensorflow.
     44   scale:[float];
     45   zero_point:[long];
     46 }
     47 
     48 table Tensor {
     49   // The tensor shape. The meaning of each entry is operator-specific but
     50   // builtin ops use: [batch size, number of channels, height, width] (That's
     51   // Tensorflow's NCHW).
     52   shape:[int];
     53   type:TensorType;
     54   // An index that refers to the buffers table at the root of the model. Or,
     55   // if there is no data buffer associated (i.e. intermediate results), then
     56   // this is 0 (which refers to an always existant empty buffer).
     57   //
     58   // The data_buffer itself is an opaque container, with the assumption that the
     59   // target device is little-endian. In addition, all builtin operators assume
     60   // the memory is ordered such that if `shape` is [4, 3, 2], then index
     61   // [i, j, k] maps to data_buffer[i*3*2 + j*3 + k].
     62   buffer:uint;
     63   name:string;  // For debugging and importing back into tensorflow.
     64   quantization:QuantizationParameters;  // Optional.
     65 }
     66 
     67 // A list of builtin operators. Builtin operators a slighlty faster than custom
     68 // ones, but not by much. Moreover, while custom operators accept an opaque
     69 // object containing configuration parameters, builtins have a predetermined
     70 // set of acceptable options.
     71 enum BuiltinOperator : byte {
     72   ADD = 0,
     73   AVERAGE_POOL_2D = 1,
     74   CONCATENATION = 2,
     75   CONV_2D = 3,
     76   DEPTHWISE_CONV_2D = 4,
     77   // DEPTH_TO_SPACE = 5,
     78   // DEQUANTIZE = 6,
     79   EMBEDDING_LOOKUP = 7,
     80   // FLOOR = 8,
     81   FULLY_CONNECTED = 9,
     82   HASHTABLE_LOOKUP = 10,
     83   L2_NORMALIZATION = 11,
     84   L2_POOL_2D = 12,
     85   LOCAL_RESPONSE_NORMALIZATION = 13,
     86   LOGISTIC = 14,
     87   LSH_PROJECTION = 15,
     88   LSTM = 16,
     89   MAX_POOL_2D = 17,
     90   // MUL = 18,
     91   RELU = 19,
     92   // RELU1=20,
     93   RELU6 = 21,
     94   RESHAPE = 22,
     95   RESIZE_BILINEAR = 23,
     96   RNN = 24,
     97   SOFTMAX = 25,
     98   SPACE_TO_DEPTH = 26,
     99   SVDF = 27,
    100   TANH = 28,
    101   // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
    102   CONCAT_EMBEDDINGS = 29,
    103   SKIP_GRAM = 30,
    104   CALL = 31,
    105   CUSTOM = 32,
    106 
    107 }
    108 
    109 // Options for the builtin operators.
    110 union BuiltinOptions {
    111   Conv2DOptions,
    112   DepthwiseConv2DOptions,
    113   ConcatEmbeddingsOptions,
    114   LSHProjectionOptions,
    115   Pool2DOptions,
    116   SVDFOptions,
    117   RNNOptions,
    118   FullyConnectedOptions,
    119   SoftmaxOptions,
    120   ConcatenationOptions,
    121   AddOptions,
    122   L2NormOptions,
    123   LocalResponseNormalizationOptions,
    124   LSTMOptions,
    125   ResizeBilinearOptions,
    126   CallOptions,
    127   ReshapeOptions,
    128   SkipGramOptions,
    129   SpaceToDepthOptions,
    130 }
    131 
    132 enum Padding : byte { SAME, VALID }
    133 
    134 enum ActivationFunctionType : byte {
    135   NONE = 0,
    136   RELU = 1,
    137   RELU1 = 2,
    138   RELU6 = 3,
    139   TANH = 4,
    140   SIGN_BIT = 5,
    141 }
    142 
    143 table Conv2DOptions {
    144   padding:Padding;
    145   stride_w:int;
    146   stride_h:int;
    147   fused_activation_function:ActivationFunctionType;
    148 }
    149 
    150 table Pool2DOptions {
    151   padding:Padding;
    152   stride_w:int;
    153   stride_h:int;
    154   filter_width:int;
    155   filter_height:int;
    156   fused_activation_function:ActivationFunctionType;
    157 }
    158 
    159 table DepthwiseConv2DOptions {
    160   padding:Padding;
    161   stride_w:int;
    162   stride_h:int;
    163   depth_multiplier:int;
    164   fused_activation_function:ActivationFunctionType;
    165 }
    166 
    167 table ConcatEmbeddingsOptions {
    168   num_channels:int;
    169   num_columns_per_channel:[int];
    170   embedding_dim_per_channel:[int]; // This could be inferred from parameters.
    171 }
    172 
    173 enum LSHProjectionType: byte {
    174   UNKNOWN = 0,
    175   SPARSE = 1,
    176   DENSE = 2,
    177 }
    178 
    179 table LSHProjectionOptions {
    180   type: LSHProjectionType;
    181 }
    182 
    183 table SVDFOptions {
    184   rank:int;
    185   fused_activation_function:ActivationFunctionType;
    186 }
    187 
    188 // An implementation of TensorFlow RNNCell.
    189 table RNNOptions {
    190   fused_activation_function:ActivationFunctionType;
    191 }
    192 
    193 // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
    194 table FullyConnectedOptions {
    195   fused_activation_function:ActivationFunctionType;
    196 }
    197 
    198 table SoftmaxOptions {
    199   beta: float;
    200 }
    201 
    202 // An implementation of TensorFlow concat.
    203 table ConcatenationOptions {
    204   axis:int;
    205   fused_activation_function:ActivationFunctionType;
    206 }
    207 
    208 table AddOptions {
    209   fused_activation_function:ActivationFunctionType;
    210 }
    211 
    212 table L2NormOptions {
    213   fused_activation_function:ActivationFunctionType;
    214 }
    215 
    216 table LocalResponseNormalizationOptions {
    217   radius:int;
    218   bias:float;
    219   alpha:float;
    220   beta:float;
    221 }
    222 
    223 // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
    224 table LSTMOptions {
    225   fused_activation_function:ActivationFunctionType;
    226   cell_clip: float; // Optional, 0.0 means no clipping
    227   proj_clip: float; // Optional, 0.0 means no clipping
    228 }
    229 
    230 table ResizeBilinearOptions {
    231   new_height:int;
    232   new_width:int;
    233 }
    234 
    235 // A call operation options
    236 table CallOptions {
    237   // The subgraph index that needs to be called.
    238   subgraph:uint;
    239 }
    240 
    241 table ReshapeOptions {
    242   new_shape:[int];
    243 }
    244 
    245 table SkipGramOptions {
    246   ngram_size: int;
    247   max_skip_size: int;
    248   include_all_ngrams: bool;
    249 }
    250 
    251 table SpaceToDepthOptions {
    252   block_size: int;
    253 }
    254 
    255 // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
    256 // builtin, or a string if the operator is custom.
    257 table OperatorCode {
    258   builtin_code:BuiltinOperator;
    259   custom_code:string;
    260 }
    261 
    262 // An operator takes tensors as inputs and outputs. The type of operation being
    263 // performed is determined by an index into the list of valid OperatorCodes,
    264 // while the specifics of each operations is configured using builtin_options
    265 // or custom_options.
    266 table Operator {
    267   // Index into the operator_codes array. Using an integer here avoids
    268   // complicate map lookups.
    269   opcode_index:uint;
    270 
    271   inputs:[int];
    272   outputs:[int];
    273 
    274   builtin_options:BuiltinOptions;
    275   custom_options:[ubyte];
    276 }
    277 
    278 // The root type, defining a model.
    279 table SubGraph {
    280   // A list of all tensors used in this model.
    281   tensors:[Tensor];
    282 
    283   // Indices of the input tensors.
    284   inputs:[int];
    285 
    286   // Indices of the output tensors.
    287   outputs:[int];
    288 
    289   // All operators, in execution order.
    290   operators:[Operator];
    291 
    292   // Name of subgraph (used for debugging).
    293   name:string;
    294 }
    295 
    296 // Table of raw data buffers (used for constant tensors). Referenced by tensors
    297 // by index.
    298 table Buffer {
    299   data:[ubyte];
    300 }
    301 
    302 table Model {
    303   // Version of the schema.
    304   version:uint;
    305 
    306   // A list of all operator codes used in this model. This is
    307   // kept in order because operators carry an index into this
    308   // vector.
    309   operator_codes:[OperatorCode];
    310 
    311   // All the subgraphs of the model. The 0th is assumed to be the main
    312   // model.
    313   subgraphs:[SubGraph];
    314 
    315   // A description of the model.
    316   description:string;
    317 
    318   // Buffers of the model.
    319   // NOTE: It is required that the first entry in here is always an empty
    320   // buffer. This is so that the default buffer index of zero in Tensor
    321   // will always refer to a valid empty buffer.
    322   buffers:[Buffer];
    323 
    324 }
    325 
    326 root_type Model;
    327