Home | History | Annotate | Download | only in lite
      1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
      2 
      3 Licensed under the Apache License, Version 2.0 (the "License");
      4 you may not use this file except in compliance with the License.
      5 You may obtain a copy of the License at
      6 
      7     http://www.apache.org/licenses/LICENSE-2.0
      8 
      9 Unless required by applicable law or agreed to in writing, software
     10 distributed under the License is distributed on an "AS IS" BASIS,
     11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     12 See the License for the specific language governing permissions and
     13 limitations under the License.
     14 ==============================================================================*/
     15 
     16 #include "tensorflow/contrib/lite/simple_memory_arena.h"
     17 
     18 #include <cstring>
     19 #include <limits>
     20 #include <vector>
     21 
     22 namespace {
     23 
     24 template <typename T>
     25 T AlignTo(size_t alignment, T offset) {
     26   return offset % alignment == 0 ? offset
     27                                  : offset + (alignment - offset % alignment);
     28 }
     29 
     30 }  // namespace
     31 
     32 namespace tflite {
     33 
     34 TfLiteStatus SimpleMemoryArena::Allocate(TfLiteContext* context,
     35                                          size_t alignment, size_t size,
     36                                          ArenaAlloc* new_alloc) {
     37   TF_LITE_ENSURE(context, alignment < arena_alignment_);
     38 
     39   size_t current_top = 0;
     40 
     41   if (!allocs_.empty()) {
     42     auto last = allocs_.rbegin();
     43     current_top = last->offset + last->size;
     44   }
     45 
     46   // If we don't find a better gap just allocate at the end of the buffer.
     47   size_t best_offset = AlignTo(alignment, current_top);
     48   size_t best_offset_fit = std::numeric_limits<size_t>::max();
     49   auto best_insertion_it = allocs_.end();
     50 
     51   // Go through the sorted allocs and look at the gaps between them.
     52   size_t current_offset = 0;
     53   for (auto it = allocs_.begin(); it != allocs_.end(); ++it) {
     54     size_t aligned_current_offset = AlignTo(alignment, current_offset);
     55     // If we found a gap larger than required size, and smaller than previous
     56     // best fit, take it.
     57     if (aligned_current_offset + size <= it->offset &&
     58         it->offset - current_offset < best_offset_fit) {
     59       best_offset = aligned_current_offset;
     60       best_offset_fit = it->offset - current_offset;
     61       best_insertion_it = it;
     62     }
     63     current_offset = it->offset + it->size;
     64   }
     65 
     66   // Update the required buffer size.
     67   high_water_mark_ = std::max(high_water_mark_, best_offset + size);
     68 
     69   new_alloc->offset = best_offset;
     70   new_alloc->size = size;
     71   allocs_.insert(best_insertion_it, *new_alloc);
     72 
     73   return kTfLiteOk;
     74 }
     75 
     76 TfLiteStatus SimpleMemoryArena::Deallocate(TfLiteContext* context,
     77                                            const ArenaAlloc& alloc) {
     78   int erased_allocs_count = 0;
     79   auto it = allocs_.begin();
     80   while (it != allocs_.end()) {
     81     if (it->offset == alloc.offset) {
     82       TF_LITE_ENSURE_EQ(context, it->size, alloc.size);
     83       erased_allocs_count++;
     84       it = allocs_.erase(it);
     85     } else {
     86       ++it;
     87     }
     88   }
     89   TF_LITE_ENSURE_EQ(context, erased_allocs_count, 1);
     90   return kTfLiteOk;
     91 }
     92 
     93 TfLiteStatus SimpleMemoryArena::Commit(TfLiteContext* context) {
     94   size_t required_size = RequiredBufferSize();
     95   if (required_size > underlying_buffer_size_) {
     96     char* new_alloc = new char[required_size];
     97     char* new_underlying_buffer_aligned_ptr = reinterpret_cast<char*>(
     98         AlignTo(arena_alignment_, reinterpret_cast<intptr_t>(new_alloc)));
     99 
    100     // If the arena had been previously allocated, copy over the old memory.
    101     // Since Alloc pointers are offset based, they will remain valid in the new
    102     // memory block.
    103     if (high_water_mark_ > 0 && underlying_buffer_size_ > 0) {
    104       size_t copy_amount = std::min(
    105           underlying_buffer_.get() + underlying_buffer_size_ -
    106               underlying_buffer_aligned_ptr_,
    107           new_alloc + required_size - new_underlying_buffer_aligned_ptr);
    108       memcpy(new_underlying_buffer_aligned_ptr, underlying_buffer_aligned_ptr_,
    109              copy_amount);
    110     }
    111 
    112     underlying_buffer_.reset(new_alloc);
    113     underlying_buffer_size_ = required_size;
    114     underlying_buffer_aligned_ptr_ = new_underlying_buffer_aligned_ptr;
    115   }
    116   commited_ = true;
    117   return underlying_buffer_ != nullptr ? kTfLiteOk : kTfLiteError;
    118 }
    119 
    120 TfLiteStatus SimpleMemoryArena::ResolveAlloc(TfLiteContext* context,
    121                                              const ArenaAlloc& alloc,
    122                                              char** output_ptr) {
    123   TF_LITE_ENSURE(context, commited_);
    124   TF_LITE_ENSURE(context, output_ptr != nullptr);
    125   *output_ptr = underlying_buffer_aligned_ptr_ + alloc.offset;
    126   return kTfLiteOk;
    127 }
    128 
    129 TfLiteStatus SimpleMemoryArena::Clear() {
    130   commited_ = false;
    131   high_water_mark_ = 0;
    132   allocs_.clear();
    133   return kTfLiteOk;
    134 }
    135 
    136 }  // namespace tflite
    137