Home | History | Annotate | Download | only in optimized
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 #ifndef TENSORFLOW_CONTRIB_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_TENSOR_UTILS_H_
     16 #define TENSORFLOW_CONTRIB_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_TENSOR_UTILS_H_
     17 
     18 // TODO(ghodrat): Remove this header file and the dependency to internal data
     19 // structure.
     20 #include "tensorflow/contrib/lite/builtin_op_data.h"
     21 #include "tensorflow/contrib/lite/kernels/internal/optimized/cpu_check.h"
     22 #include "tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h"
     23 
     24 namespace tflite {
     25 namespace tensor_utils {
     26 
     27 void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows,
     28                                          int m_cols, const float* vector,
     29                                          int n_batch, float* result,
     30                                          int result_stride) {
     31   NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
     32                    vector, n_batch, result, result_stride);
     33 }
     34 
     35 void VectorVectorCwiseProduct(const float* vector1, const float* vector2,
     36                               int v_size, float* result) {
     37   NEON_OR_PORTABLE(VectorVectorCwiseProduct, vector1, vector2, v_size, result);
     38 }
     39 
     40 void VectorVectorCwiseProductAccumulate(const float* vector1,
     41                                         const float* vector2, int v_size,
     42                                         float* result) {
     43   NEON_OR_PORTABLE(VectorVectorCwiseProductAccumulate, vector1, vector2, v_size,
     44                    result);
     45 }
     46 
     47 void VectorBatchVectorCwiseProductAccumulate(const float* vector, int v_size,
     48                                              const float* batch_vector,
     49                                              int n_batch, float* result) {
     50   NEON_OR_PORTABLE(VectorBatchVectorCwiseProductAccumulate, vector, v_size,
     51                    batch_vector, n_batch, result);
     52 }
     53 
     54 float VectorVectorDotProduct(const float* vector1, const float* vector2,
     55                              int v_size) {
     56   return NEON_OR_PORTABLE(VectorVectorDotProduct, vector1, vector2, v_size);
     57 }
     58 
     59 void BatchVectorBatchVectorDotProduct(const float* vector1,
     60                                       const float* vector2, int v_size,
     61                                       int n_batch, float* result,
     62                                       int result_stride) {
     63   NEON_OR_PORTABLE(BatchVectorBatchVectorDotProduct, vector1, vector2, v_size,
     64                    n_batch, result, result_stride);
     65 }
     66 
     67 void VectorBatchVectorAssign(const float* vector, int v_size, int n_batch,
     68                              float* batch_vector) {
     69   PortableVectorBatchVectorAssign(vector, v_size, n_batch, batch_vector);
     70 }
     71 
     72 void ApplySigmoidToVector(const float* vector, int v_size, float* result) {
     73   PortableApplySigmoidToVector(vector, v_size, result);
     74 }
     75 
     76 void ApplyActivationToVector(const float* vector, int v_size,
     77                              TfLiteFusedActivation activation, float* result) {
     78   PortableApplyActivationToVector(vector, v_size, activation, result);
     79 }
     80 
     81 void CopyVector(const float* vector, int v_size, float* result) {
     82   PortableCopyVector(vector, v_size, result);
     83 }
     84 
     85 void Sub1Vector(const float* vector, int v_size, float* result) {
     86   NEON_OR_PORTABLE(Sub1Vector, vector, v_size, result);
     87 }
     88 
     89 void ZeroVector(float* vector, int v_size) {
     90   PortableZeroVector(vector, v_size);
     91 }
     92 
     93 float Clip(float f, float abs_limit) { return PortableClip(f, abs_limit); }
     94 
     95 void ClipVector(const float* vector, int v_size, float abs_limit,
     96                 float* result) {
     97   NEON_OR_PORTABLE(ClipVector, vector, v_size, abs_limit, result);
     98 }
     99 
    100 void VectorShiftLeft(float* vector, int v_size, float shift_value) {
    101   NEON_OR_PORTABLE(VectorShiftLeft, vector, v_size, shift_value);
    102 }
    103 
    104 void ReductionSumVector(const float* input_vector, float* output_vector,
    105                         int output_size, int reduction_size) {
    106   NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size,
    107                    reduction_size);
    108 }
    109 
    110 }  // namespace tensor_utils
    111 }  // namespace tflite
    112 
    113 #endif  // TENSORFLOW_CONTRIB_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_TENSOR_UTILS_H_
    114