Home | History | Annotate | Download | only in c
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 // This file defines a C API for implementing operations in tflite.
     16 // These operations can be defined using c++ but the interface between
     17 // the interpreter and the operations are C.
     18 //
     19 // Summary of abstractions
     20 // TF_LITE_ENSURE - Self-sufficient error checking
     21 // TfLiteStatus - Status reporting
     22 // TfLiteIntArray - stores tensor shapes (dims),
     23 // TfLiteContext - allows an op to access the tensors
     24 // TfLiteTensor - tensor (a multidimensional array)
     25 // TfLiteNode - a single node or operation
     26 // TfLiteRegistration - the implementation of a conceptual operation.
     27 //
     28 // Some abstractions in this file are created and managed by Interpreter.
     29 #ifndef TENSORFLOW_LITE_C_C_API_INTERNAL_H_
     30 #define TENSORFLOW_LITE_C_C_API_INTERNAL_H_
     31 
     32 #include <stdbool.h>
     33 #include <stddef.h>
     34 #include <stdint.h>
     35 
     36 #ifdef __cplusplus
     37 extern "C" {
     38 #endif  // __cplusplus
     39 
     40 typedef enum { kTfLiteOk = 0, kTfLiteError = 1 } TfLiteStatus;
     41 
     42 // The list of external context types known to TF Lite. This list exists solely
     43 // to avoid conflicts and to ensure ops can share the external contexts they
     44 // need. Access to the external contexts is controled by one of the
     45 // corresponding support files.
     46 typedef enum {
     47   kTfLiteEigenContext = 0,     // include eigen_support.h to use.
     48   kTfLiteGemmLowpContext = 1,  // include gemm_support.h to use.
     49   kTfLiteEdgeTpuContext = 2,   // Placeholder for Edge TPU support.
     50   kTfLiteMaxExternalContexts = 3
     51 } TfLiteExternalContextType;
     52 
     53 // An external context is a collection of information unrelated to the TF Lite
     54 // framework, but useful to a subset of the ops. TF Lite knows very little
     55 // about about the actual contexts, but it keeps a list of them, and is able to
     56 // refresh them if configurations like the number of recommended threads
     57 // change.
     58 typedef struct {
     59   TfLiteExternalContextType type;
     60   TfLiteStatus (*Refresh)(struct TfLiteContext* context);
     61 } TfLiteExternalContext;
     62 
     63 // Forward declare so GetNode can use this is in Context.
     64 typedef struct _TfLiteRegistration TfLiteRegistration;
     65 typedef struct _TfLiteDelegate TfLiteDelegate;
     66 
     67 #define kOptionalTensor (-1)
     68 
     69 // Fixed size list of integers. Used for dimensions and inputs/outputs tensor
     70 // indices
     71 typedef struct {
     72   int size;
     73 // gcc 6.1+ have a bug where flexible members aren't properly handled
     74 // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
     75 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
     76     __GNUC_MINOR__ >= 1
     77   int data[0];
     78 #else
     79   int data[];
     80 #endif
     81 } TfLiteIntArray;
     82 
     83 // Given the size (number of elements) in a TfLiteIntArray, calculate its size
     84 // in bytes.
     85 int TfLiteIntArrayGetSizeInBytes(int size);
     86 
     87 // Create a array of a given `size` (uninitialized entries).
     88 // This returns a pointer, that you must free using TfLiteIntArrayFree().
     89 TfLiteIntArray* TfLiteIntArrayCreate(int size);
     90 
     91 // Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise.
     92 int TfLiteIntArrayEqual(TfLiteIntArray* a, TfLiteIntArray* b);
     93 
     94 // Check if an intarray equals an array. Returns 1 if equals, 0 otherwise.
     95 int TfLiteIntArrayEqualsArray(TfLiteIntArray* a, int b_size, int b_data[]);
     96 
     97 // Create a copy of an array passed as `src`.
     98 // You are expected to free memory with TfLiteIntArrayFree
     99 TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src);
    100 
    101 // Free memory of array `a`.
    102 void TfLiteIntArrayFree(TfLiteIntArray* a);
    103 
    104 // Fixed size list of floats. Used for per-channel quantization.
    105 typedef struct {
    106   int size;
    107 // gcc 6.1+ have a bug where flexible members aren't properly handled
    108 // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
    109 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
    110     __GNUC_MINOR__ >= 1
    111   float data[0];
    112 #else
    113   float data[];
    114 #endif
    115 } TfLiteFloatArray;
    116 
    117 // Given the size (number of elements) in a TfLiteFloatArray, calculate its size
    118 // in bytes.
    119 int TfLiteFloatArrayGetSizeInBytes(int size);
    120 
    121 // Create a array of a given `size` (uninitialized entries).
    122 // This returns a pointer, that you must free using TfLiteFloatArrayFree().
    123 TfLiteFloatArray* TfLiteFloatArrayCreate(int size);
    124 
    125 // Free memory of array `a`.
    126 void TfLiteFloatArrayFree(TfLiteFloatArray* a);
    127 
    128 // Since we must not depend on any libraries, define a minimal subset of
    129 // error macros while avoiding names that have pre-conceived meanings like
    130 // assert and check.
    131 
    132 // Check whether value is true, and if not return kTfLiteError from
    133 // the current function (and report the error string msg).
    134 #define TF_LITE_ENSURE_MSG(context, value, msg)            \
    135   do {                                                     \
    136     if (!(value)) {                                        \
    137       (context)->ReportError((context), __FILE__ " " msg); \
    138       return kTfLiteError;                                 \
    139     }                                                      \
    140   } while (0)
    141 
    142 // Check whether the value `a` is true, and if not return kTfLiteError from
    143 // the current function, while also reporting the location of the error.
    144 #define TF_LITE_ENSURE(context, a)                                          \
    145   do {                                                                      \
    146     if (!(a)) {                                                             \
    147       (context)->ReportError((context), "%s:%d %s was not true.", __FILE__, \
    148                              __LINE__, #a);                                 \
    149       return kTfLiteError;                                                  \
    150     }                                                                       \
    151   } while (0)
    152 
    153 #define TF_LITE_ENSURE_STATUS(a) \
    154   do {                           \
    155     if ((a) != kTfLiteOk) {      \
    156       return kTfLiteError;       \
    157     }                            \
    158   } while (0)
    159 
    160 // Check whether the value `a == b` is true, and if not return kTfLiteError from
    161 // the current function, while also reporting the location of the error.
    162 // `a` and `b` may be evaluated more than once, so no side effects or
    163 // extremely expensive computations should be done.
    164 #define TF_LITE_ENSURE_EQ(context, a, b)                                       \
    165   do {                                                                         \
    166     if ((a) != (b)) {                                                          \
    167       (context)->ReportError((context), "%s:%d %s != %s (%d != %d)", __FILE__, \
    168                              __LINE__, #a, #b, (a), (b));                      \
    169       return kTfLiteError;                                                     \
    170     }                                                                          \
    171   } while (0)
    172 
    173 #define TF_LITE_ENSURE_TYPES_EQ(context, a, b)                                 \
    174   do {                                                                         \
    175     if ((a) != (b)) {                                                          \
    176       (context)->ReportError((context), "%s:%d %s != %s (%s != %s)", __FILE__, \
    177                              __LINE__, #a, #b, TfLiteTypeGetName(a),           \
    178                              TfLiteTypeGetName(b));                            \
    179       return kTfLiteError;                                                     \
    180     }                                                                          \
    181   } while (0)
    182 
    183 #define TF_LITE_ENSURE_OK(context, status) \
    184   do {                                     \
    185     if ((status) != kTfLiteOk) {           \
    186       return kTfLiteError;                 \
    187     }                                      \
    188   } while (0)
    189 
    190 // Single-precision complex data type compatible with the C99 definition.
    191 typedef struct {
    192   float re, im;  // real and imaginary parts, respectively.
    193 } TfLiteComplex64;
    194 
    195 // Types supported by tensor
    196 typedef enum {
    197   kTfLiteNoType = 0,
    198   kTfLiteFloat32 = 1,
    199   kTfLiteInt32 = 2,
    200   kTfLiteUInt8 = 3,
    201   kTfLiteInt64 = 4,
    202   kTfLiteString = 5,
    203   kTfLiteBool = 6,
    204   kTfLiteInt16 = 7,
    205   kTfLiteComplex64 = 8,
    206   kTfLiteInt8 = 9,
    207 } TfLiteType;
    208 
    209 // Return the name of a given type, for error reporting purposes.
    210 const char* TfLiteTypeGetName(TfLiteType type);
    211 
    212 // SupportedQuantizationTypes.
    213 typedef enum {
    214   // No quantization.
    215   kTfLiteNoQuantization = 0,
    216   // Affine quantization (with support for per-channel quantization).
    217   // Corresponds to TfLiteAffineQuantization.
    218   kTfLiteAffineQuantization = 1,
    219 } TfLiteQuantizationType;
    220 
    221 // Structure specifying the quantization used by the tensor, if-any.
    222 typedef struct {
    223   // The type of quantization held by params.
    224   TfLiteQuantizationType type;
    225   // Holds a reference to one of the quantization param structures specified
    226   // below.
    227   void* params;
    228 } TfLiteQuantization;
    229 
    230 // Legacy. Will be deprecated in favor of TfLiteAffineQuantization.
    231 // If per-layer quantization is specified this field will still be populated in
    232 // addition to TfLiteAffineQuantization.
    233 // Parameters for asymmetric quantization. Quantized values can be converted
    234 // back to float using:
    235 //     real_value = scale * (quantized_value - zero_point)
    236 typedef struct {
    237   float scale;
    238   int32_t zero_point;
    239 } TfLiteQuantizationParams;
    240 
    241 // Parameters for asymmetric quantization across a dimension (i.e per output
    242 // channel quantization).
    243 // quantized_dimension specifies which dimension the scales and zero_points
    244 // correspond to.
    245 // For a particular value in quantized_dimension, quantized values can be
    246 // converted back to float using:
    247 //     real_value = scale * (quantized_value - zero_point)
    248 typedef struct {
    249   TfLiteFloatArray* scale;
    250   TfLiteIntArray* zero_point;
    251   int32_t quantized_dimension;
    252 } TfLiteAffineQuantization;
    253 
    254 // A union of pointers that points to memory for a given tensor.
    255 typedef union {
    256   int* i32;
    257   int64_t* i64;
    258   float* f;
    259   char* raw;
    260   const char* raw_const;
    261   uint8_t* uint8;
    262   bool* b;
    263   int16_t* i16;
    264   TfLiteComplex64* c64;
    265   int8_t* int8;
    266 } TfLitePtrUnion;
    267 
    268 // Memory allocation strategies. kTfLiteMmapRo is for read-only memory-mapped
    269 // data (or data externally allocated). kTfLiteArenaRw is arena allocated
    270 // data. kTfLiteDynamic is for tensors that are allocated during evaluation.
    271 typedef enum {
    272   kTfLiteMemNone = 0,
    273   kTfLiteMmapRo,
    274   kTfLiteArenaRw,
    275   kTfLiteArenaRwPersistent,
    276   kTfLiteDynamic,
    277 } TfLiteAllocationType;
    278 
    279 // The delegates should use zero or positive integers to represent handles.
    280 // -1 is reserved from unallocated status.
    281 typedef int TfLiteBufferHandle;
    282 const TfLiteBufferHandle kTfLiteNullBufferHandle = -1;
    283 
    284 // An tensor in the interpreter system which is a wrapper around a buffer of
    285 // data including a dimensionality (or NULL if not currently defined).
    286 typedef struct {
    287   // The data type specification for data stored in `data`. This affects
    288   // what member of `data` union should be used.
    289   TfLiteType type;
    290   // A union of data pointers. The appropriate type should be used for a typed
    291   // tensor based on `type`.
    292   TfLitePtrUnion data;
    293   // A pointer to a structure representing the dimensionality interpretation
    294   // that the buffer should have. NOTE: the product of elements of `dims`
    295   // and the element datatype size should be equal to `bytes` below.
    296   TfLiteIntArray* dims;
    297   // Quantization information.
    298   TfLiteQuantizationParams params;
    299   // How memory is mapped
    300   //  kTfLiteMmapRo: Memory mapped read only.
    301   //  i.e. weights
    302   //  kTfLiteArenaRw: Arena allocated read write memory
    303   //  (i.e. temporaries, outputs).
    304   TfLiteAllocationType allocation_type;
    305   // The number of bytes required to store the data of this Tensor. I.e.
    306   // (bytes of each element) * dims[0] * ... * dims[n-1].  For example, if
    307   // type is kTfLiteFloat32 and dims = {3, 2} then
    308   // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
    309   size_t bytes;
    310 
    311   // An opaque pointer to a tflite::MMapAllocation
    312   const void* allocation;
    313 
    314   // Null-terminated name of this tensor.
    315   const char* name;
    316 
    317   // The delegate which knows how to handle `buffer_handle`.
    318   // WARNING: This is an experimental interface that is subject to change.
    319   TfLiteDelegate* delegate;
    320 
    321   // An integer buffer handle that can be handled by `delegate`.
    322   // The value is valid only when delegate is not null.
    323   // WARNING: This is an experimental interface that is subject to change.
    324   TfLiteBufferHandle buffer_handle;
    325 
    326   // If the delegate uses its own buffer (e.g. GPU memory), the delegate is
    327   // responsible to set data_is_stale to true.
    328   // `delegate->CopyFromBufferHandle` can be called to copy the data from
    329   // delegate buffer.
    330   // WARNING: This is an // experimental interface that is subject to change.
    331   bool data_is_stale;
    332 
    333   // True if the tensor is a variable.
    334   bool is_variable;
    335 
    336   // Quantization information. Replaces params field above.
    337   TfLiteQuantization quantization;
    338 } TfLiteTensor;
    339 
    340 // Free data memory of tensor `t`.
    341 void TfLiteTensorDataFree(TfLiteTensor* t);
    342 
    343 // Free quantization data.
    344 void TfLiteQuantizationFree(TfLiteQuantization* quantization);
    345 
    346 // Free memory of tensor `t`.
    347 void TfLiteTensorFree(TfLiteTensor* t);
    348 
    349 // Set all of a tensor's fields (and free any previously allocated data).
    350 void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
    351                        TfLiteQuantizationParams quantization, char* buffer,
    352                        size_t size, TfLiteAllocationType allocation_type,
    353                        const void* allocation, bool is_variable,
    354                        TfLiteTensor* tensor);
    355 
    356 // Resize the allocated data of a (dynamic) tensor. Tensors with allocation
    357 // types other than kTfLiteDynamic will be ignored.
    358 void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
    359 
    360 // A structure representing an instance of a node.
    361 // This structure only exhibits the inputs, outputs and user defined data, not
    362 // other features like the type.
    363 typedef struct {
    364   // Inputs to this node expressed as indices into the simulator's tensors.
    365   TfLiteIntArray* inputs;
    366 
    367   // Outputs to this node expressed as indices into the simulator's tensors.
    368   TfLiteIntArray* outputs;
    369 
    370   // Temporary tensors uses during the computations. This usually contains no
    371   // tensors, but ops are allowed to change that if they need scratch space of
    372   // any sort.
    373   TfLiteIntArray* temporaries;
    374 
    375   // Opaque data provided by the node implementer through `Registration.init`.
    376   void* user_data;
    377 
    378   // Opaque data provided to the node if the node is a builtin. This is usually
    379   // a structure defined in builtin_op_data.h
    380   void* builtin_data;
    381 
    382   // Custom initial data. This is the opaque data provided in the flatbuffer.
    383   // WARNING: This is an experimental interface that is subject to change.
    384   const void* custom_initial_data;
    385   int custom_initial_data_size;
    386 
    387   // The pointer to the delegate. This is non-null only when the node is
    388   // created by calling `interpreter.ModifyGraphWithDelegate`.
    389   // WARNING: This is an experimental interface that is subject to change.
    390   TfLiteDelegate* delegate;
    391 } TfLiteNode;
    392 
    393 typedef struct TfLiteContext {
    394   // Number of tensors in the context.
    395   size_t tensors_size;
    396 
    397   // The execution plan contains a list of the node indices in execution
    398   // order. execution_plan->size is the current number of nodes. And,
    399   // execution_plan->data[0] is the first node that needs to be run.
    400   // TfLiteDelegates can traverse the current execution plan by iterating
    401   // through each member of this array and using GetNodeAndRegistration() to
    402   // access details about a node. i.e.
    403   // TfLiteIntArray* execution_plan;
    404   // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
    405   // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
    406   //    int node_index = execution_plan->data[exec_index];
    407   //    TfLiteNode* node;
    408   //    TfLiteRegistration* reg;
    409   //    context->GetNodeAndRegistration(context, node_index, &node, &reg);
    410   // }
    411   // WARNING: This is an experimental interface that is subject to change.
    412   TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
    413                                    TfLiteIntArray** execution_plan);
    414 
    415   // An array of tensors in the interpreter context (of length `tensors_size`)
    416   TfLiteTensor* tensors;
    417 
    418   // opaque full context ptr (an opaque c++ data structure)
    419   void* impl_;
    420 
    421   // Request memory pointer be resized. Updates dimensions on the tensor.
    422   // NOTE: ResizeTensor takes ownership of newSize.
    423   TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor,
    424                                TfLiteIntArray* new_size);
    425   // Request that a error be reported with format string msg.
    426   void (*ReportError)(struct TfLiteContext*, const char* msg, ...);
    427 
    428   // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries.  If
    429   // non-null, the value pointed to by `first_new_tensor_index` will be set to
    430   // the index of the first new tensor.
    431   TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add,
    432                              int* first_new_tensor_index);
    433 
    434   // Get a Tensor node by node_index.
    435   // WARNING: This is an experimental interface that is subject to change.
    436   TfLiteStatus (*GetNodeAndRegistration)(struct TfLiteContext*, int node_index,
    437                                          TfLiteNode** node,
    438                                          TfLiteRegistration** registration);
    439 
    440   // Replace ops with one or more stub delegate operations. This function
    441   // does not take ownership of `nodes_to_replace`.
    442   TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)(
    443       struct TfLiteContext*, TfLiteRegistration registration,
    444       const TfLiteIntArray* nodes_to_replace, TfLiteDelegate* delegate);
    445 
    446   // Number of threads that are recommended to subsystems like gemmlowp and
    447   // eigen.
    448   int recommended_num_threads;
    449 
    450   // Access external contexts by type.
    451   // WARNING: This is an experimental interface that is subject to change.
    452   TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*,
    453                                                TfLiteExternalContextType);
    454   // Set the value of a external context. Does not take ownership of the
    455   // pointer.
    456   // WARNING: This is an experimental interface that is subject to change.
    457   void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType,
    458                              TfLiteExternalContext*);
    459 
    460   // Flag for allowing float16 precision for FP32 calculation.
    461   // default: false.
    462   // WARNING: This is an experimental API and subject to change.
    463   bool allow_fp32_relax_to_fp16;
    464 
    465   // Pointer to the op-level profiler, if set; nullptr otherwise.
    466   void* profiler;
    467 } TfLiteContext;
    468 
    469 typedef struct _TfLiteRegistration {
    470   // Initializes the op from serialized data.
    471   // If a built-in op:
    472   //   `buffer` is the op's params data (TfLiteLSTMParams*).
    473   //   `length` is zero.
    474   // If custom op:
    475   //   `buffer` is the op's `custom_options`.
    476   //   `length` is the size of the buffer.
    477   //
    478   // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer
    479   // or an instance of a struct).
    480   //
    481   // The returned pointer will be stored with the node in the `user_data` field,
    482   // accessible within prepare and invoke functions below.
    483   // NOTE: if the data is already in the desired format, simply implement this
    484   // function to return `nullptr` and implement the free function to be a no-op.
    485   void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
    486 
    487   // The pointer `buffer` is the data previously returned by an init invocation.
    488   void (*free)(TfLiteContext* context, void* buffer);
    489 
    490   // prepare is called when the inputs this node depends on have been resized.
    491   // context->ResizeTensor() can be called to request output tensors to be
    492   // resized.
    493   //
    494   // Returns kTfLiteOk on success.
    495   TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
    496 
    497   // Execute the node (should read node->inputs and output to node->outputs).
    498   // Returns kTfLiteOk on success.
    499   TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
    500 
    501   // profiling_string is called during summarization of profiling information
    502   // in order to group executions together. Providing a value here will cause a
    503   // given op to appear multiple times is the profiling report. This is
    504   // particularly useful for custom ops that can perform significantly
    505   // different calculations depending on their `user-data`.
    506   const char* (*profiling_string)(const TfLiteContext* context,
    507                                   const TfLiteNode* node);
    508 
    509   // Builtin codes. If this kernel refers to a builtin this is the code
    510   // of the builtin. This is so we can do marshaling to other frameworks like
    511   // NN API.
    512   // Note: It is the responsibility of the registration binder to set this
    513   // properly.
    514   int32_t builtin_code;
    515 
    516   // Custom op name. If the op is a builtin, this will be null.
    517   // Note: It is the responsibility of the registration binder to set this
    518   // properly.
    519   // WARNING: This is an experimental interface that is subject to change.
    520   const char* custom_name;
    521 
    522   // The version of the op.
    523   // Note: It is the responsibility of the registration binder to set this
    524   // properly.
    525   int version;
    526 } TfLiteRegistration;
    527 
    528 // The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the
    529 // values should be 1, 2, 4, 8, ...etc.
    530 typedef enum {
    531   kTfLiteDelegateFlagsNone = 0,
    532   // The flag is set if the delegate can handle dynamic sized tensors.
    533   // For example, the output shape of a `Resize` op with non-constant shape
    534   // can only be inferred when the op is invoked.
    535   // In this case, the Delegate is responsible for calling
    536   // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling
    537   // `ResizeTensor` when invoking the op.
    538   //
    539   // If the delegate isn't capable to handle dynamic tensors, this flag need
    540   // to be set to false.
    541   kTfLiteDelegateFlagsAllowDynamicTensors = 1
    542 } TfLiteDelegateFlags;
    543 
    544 // WARNING: This is an experimental interface that is subject to change.
    545 typedef struct _TfLiteDelegate {
    546   // Data that delegate needs to identify itself. This data is owned by the
    547   // delegate. The delegate is owned in the user code, so the delegate is
    548   // responsible for doing this when it is destroyed.
    549   void* data_;
    550 
    551   // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
    552   // delegate a view of the current graph through TfLiteContext*. It typically
    553   // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels()
    554   // to ask the TensorFlow lite runtime to create macro-nodes to represent
    555   // delegated subgraphs of the original graph.
    556   TfLiteStatus (*Prepare)(TfLiteContext* context, TfLiteDelegate* delegate);
    557 
    558   // Copy the data from delegate buffer handle into raw memory of the given
    559   // 'tensor'. This cannot be null. The delegate is allowed to allocate the raw
    560   // bytes as long as it follows the rules for kTfLiteDynamic tensors.
    561   TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
    562                                        TfLiteDelegate* delegate,
    563                                        TfLiteBufferHandle buffer_handle,
    564                                        TfLiteTensor* tensor);
    565 
    566   // Copy the data from raw memory of the given 'tensor' to delegate buffer
    567   // handle. This can be null if the delegate doesn't use its own buffer.
    568   TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context,
    569                                      TfLiteDelegate* delegate,
    570                                      TfLiteBufferHandle buffer_handle,
    571                                      TfLiteTensor* tensor);
    572 
    573   // Free the Delegate Buffer Handle. Note: This only frees the handle, but
    574   // this doesn't release the underlying resource (e.g. textures). The
    575   // resources are either owned by application layer or the delegate.
    576   // This can be null if the delegate doesn't use its own buffer.
    577   void (*FreeBufferHandle)(TfLiteContext* context, TfLiteDelegate* delegate,
    578                            TfLiteBufferHandle* handle);
    579 
    580   // Bitmask flags. See the comments in `TfLiteDelegateFlags`.
    581   int64_t flags;
    582 } TfLiteDelegate;
    583 
    584 // Build a 'null' delegate, with all the fields properly set to their default
    585 // values.
    586 TfLiteDelegate TfLiteDelegateCreate();
    587 
    588 // WARNING: This is an experimental interface that is subject to change.
    589 //
    590 // Currently, TfLiteDelegateParams has to be allocated in a way that it's
    591 // trivially destructable. It will be stored as `builtin_data` field in
    592 // `TfLiteNode` of the delegate node.
    593 //
    594 // See also the `CreateDelegateParams` function in `interpreter.cc` details.
    595 typedef struct {
    596   TfLiteDelegate* delegate;
    597   TfLiteIntArray* nodes_to_replace;
    598   TfLiteIntArray* input_tensors;
    599   TfLiteIntArray* output_tensors;
    600 } TfLiteDelegateParams;
    601 
    602 #ifdef __cplusplus
    603 }  // extern "C"
    604 #endif  // __cplusplus
    605 #endif  // TENSORFLOW_LITE_C_C_API_INTERNAL_H_
    606