Home | History | Annotate | Download | only in optimized
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 #ifndef TF_LITE_KERNELS_INTERNAL_OPTIMIZED_TENSOR_UTILS_IMPL_H_
     16 #define TF_LITE_KERNELS_INTERNAL_OPTIMIZED_TENSOR_UTILS_IMPL_H_
     17 
     18 // TODO(ghodrat): Remove this header file and the dependency to internal data
     19 // structure.
     20 #include "tensorflow/contrib/lite/builtin_op_data.h"
     21 
     22 #ifndef USE_NEON
     23 #if defined(__ARM_NEON__) || defined(__ARM_NEON)
     24 #define USE_NEON
     25 #endif  //  defined(__ARM_NEON__) || defined(__ARM_NEON)
     26 #endif  //  USE_NEON
     27 
     28 namespace tflite {
     29 namespace tensor_utils {
     30 
     31 // Multiply a matrix by a batch vector, and store results in a batch-size
     32 // vector.
     33 void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix,
     34                                                  int m_rows, int m_cols,
     35                                                  const float* vector,
     36                                                  int n_batch, float* result,
     37                                                  int result_stride);
     38 void NeonMatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows,
     39                                              int m_cols, const float* vector,
     40                                              int n_batch, float* result,
     41                                              int result_stride);
     42 
     43 // Cwise product of two vectors.
     44 void PortableVectorVectorCwiseProduct(const float* vector1,
     45                                       const float* vector2, int v_size,
     46                                       float* result);
     47 void NeonVectorVectorCwiseProduct(const float* vector1, const float* vector2,
     48                                   int v_size, float* result);
     49 
     50 // Cwise product and accumulate of two vectors. Since it's a MAC operation, the
     51 // assumption here is that result array is initialized to valid values.
     52 void PortableVectorVectorCwiseProductAccumulate(const float* vector1,
     53                                                 const float* vector2,
     54                                                 int v_size, float* result);
     55 void NeonVectorVectorCwiseProductAccumulate(const float* vector1,
     56                                             const float* vector2, int v_size,
     57                                             float* result);
     58 
     59 // Dot product of two vectors.
     60 float PortableVectorVectorDotProduct(const float* vector1, const float* vector2,
     61                                      int v_size);
     62 float NeonVectorVectorDotProduct(const float* vector1, const float* vector2,
     63                                  int v_size);
     64 
     65 // Dot product of two batch vectors.
     66 void PortableBatchVectorBatchVectorDotProduct(const float* vector1,
     67                                               const float* vector2, int v_size,
     68                                               int n_batch, float* result,
     69                                               int result_stride);
     70 void NeonBatchVectorBatchVectorDotProduct(const float* vector1,
     71                                           const float* vector2, int v_size,
     72                                           int n_batch, float* result,
     73                                           int result_stride);
     74 
     75 // Cwise product and accumulate of a vector and a batch-vector. Since it's a MAC
     76 // operation, the assumption here is that result array is initialized to valid
     77 // values.
     78 void PortableVectorBatchVectorCwiseProductAccumulate(const float* vector,
     79                                                      int v_size,
     80                                                      const float* batch_vector,
     81                                                      int n_batch,
     82                                                      float* result);
     83 void NeonVectorBatchVectorCwiseProductAccumulate(const float* vector,
     84                                                  int v_size,
     85                                                  const float* batch_vector,
     86                                                  int n_batch, float* result);
     87 
     88 // Compute "1.0f - elements of vector" (used in CIFG).
     89 void PortableSub1Vector(const float* vector, int v_size, float* result);
     90 void NeonSub1Vector(const float* vector, int v_size, float* result);
     91 
     92 // Clip elements of a vector using a abs_limit value.
     93 void PortableClipVector(const float* vector, int v_size, float abs_limit,
     94                         float* result);
     95 void NeonClipVector(const float* vector, int v_size, float abs_limit,
     96                     float* result);
     97 
     98 // Batch vector initialization with another vector.
     99 void PortableVectorBatchVectorAssign(const float* vector, int v_size,
    100                                      int n_batch, float* batch_vector);
    101 
    102 // Apply sigmoid to elements of a vector.
    103 void PortableApplySigmoidToVector(const float* vector, int v_size,
    104                                   float* result);
    105 
    106 // Apply activation function to elements of a vector.
    107 void PortableApplyActivationToVector(const float* vector, int v_size,
    108                                      TfLiteFusedActivation activation,
    109                                      float* result);
    110 
    111 // Copy vector to another vector.
    112 void PortableCopyVector(const float* vector, int v_size, float* result);
    113 
    114 // Fill vector with 0.f.
    115 void PortableZeroVector(float* vector, int v_size);
    116 
    117 // Limit a float input f between +abs_limit and -abs_limit.
    118 float PortableClip(float f, float abs_limit);
    119 
    120 // Shift left a vector in place with v_size size.
    121 void PortableVectorShiftLeft(float* vector, int v_size, float shift_value);
    122 void NeonVectorShiftLeft(float* vector, int v_size, float shift_value);
    123 
    124 // Reduce-sum on a float input vector:
    125 // input_vector: float pointer to input vector.
    126 // output_vector: float pointer to vector.
    127 // output_size: output vector size.
    128 // reduction_size: number of consecutive elements from input vector which are
    129 // added to get one element of output.
    130 void PortableReductionSumVector(const float* input_vector, float* output_vector,
    131                                 int output_size, int reduction_size);
    132 void NeonReductionSumVector(const float* input_vector, float* output_vector,
    133                             int output_size, int reduction_size);
    134 
    135 }  // namespace tensor_utils
    136 }  // namespace tflite
    137 
    138 #endif  // TF_LITE_KERNELS_INTERNAL_OPTIMIZED_TENSOR_UTILS_IMPL_H_
    139