1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/simple_memory_arena.h"
17
18 #include <algorithm>
19 #include <cstring>
20 #include <limits>
21 #include <vector>
22
23 namespace {
24
25 template <typename T>
AlignTo(size_t alignment,T offset)26 T AlignTo(size_t alignment, T offset) {
27 return offset % alignment == 0 ? offset
28 : offset + (alignment - offset % alignment);
29 }
30
31 } // namespace
32
33 namespace tflite {
34
Allocate(TfLiteContext * context,size_t alignment,size_t size,ArenaAlloc * new_alloc)35 TfLiteStatus SimpleMemoryArena::Allocate(TfLiteContext* context,
36 size_t alignment, size_t size,
37 ArenaAlloc* new_alloc) {
38 TF_LITE_ENSURE(context, alignment <= arena_alignment_);
39
40 if (size == 0) {
41 new_alloc->offset = 0;
42 new_alloc->size = 0;
43 return kTfLiteOk;
44 }
45
46 size_t current_top = 0;
47
48 if (!allocs_.empty()) {
49 auto last = allocs_.rbegin();
50 current_top = last->offset + last->size;
51 }
52
53 // If we don't find a better gap just allocate at the end of the buffer.
54 size_t best_offset = AlignTo(alignment, current_top);
55 size_t best_offset_fit = std::numeric_limits<size_t>::max();
56 auto best_insertion_it = allocs_.end();
57
58 // Go through the sorted allocs and look at the gaps between them.
59 size_t current_offset = 0;
60 for (auto it = allocs_.begin(); it != allocs_.end(); ++it) {
61 size_t aligned_current_offset = AlignTo(alignment, current_offset);
62 // If we found a gap larger than required size, and smaller than previous
63 // best fit, take it.
64 if (aligned_current_offset + size <= it->offset &&
65 it->offset - current_offset < best_offset_fit) {
66 best_offset = aligned_current_offset;
67 best_offset_fit = it->offset - current_offset;
68 best_insertion_it = it;
69 }
70 current_offset = it->offset + it->size;
71 }
72
73 // Update the required buffer size.
74 high_water_mark_ = std::max(high_water_mark_, best_offset + size);
75
76 new_alloc->offset = best_offset;
77 new_alloc->size = size;
78 allocs_.insert(best_insertion_it, *new_alloc);
79
80 return kTfLiteOk;
81 }
82
Deallocate(TfLiteContext * context,const ArenaAlloc & alloc)83 TfLiteStatus SimpleMemoryArena::Deallocate(TfLiteContext* context,
84 const ArenaAlloc& alloc) {
85 if (alloc.size == 0) {
86 return kTfLiteOk;
87 }
88
89 int erased_allocs_count = 0;
90 auto it = allocs_.begin();
91 while (it != allocs_.end()) {
92 if (it->offset == alloc.offset) {
93 TF_LITE_ENSURE_EQ(context, it->size, alloc.size);
94 erased_allocs_count++;
95 it = allocs_.erase(it);
96 } else {
97 ++it;
98 }
99 }
100 TF_LITE_ENSURE_EQ(context, erased_allocs_count, 1);
101 return kTfLiteOk;
102 }
103
Commit(TfLiteContext * context)104 TfLiteStatus SimpleMemoryArena::Commit(TfLiteContext* context) {
105 size_t required_size = RequiredBufferSize();
106 if (required_size > underlying_buffer_size_) {
107 char* new_alloc = new char[required_size];
108 char* new_underlying_buffer_aligned_ptr = reinterpret_cast<char*>(
109 AlignTo(arena_alignment_, reinterpret_cast<intptr_t>(new_alloc)));
110
111 // If the arena had been previously allocated, copy over the old memory.
112 // Since Alloc pointers are offset based, they will remain valid in the new
113 // memory block.
114 if (high_water_mark_ > 0 && underlying_buffer_size_ > 0) {
115 size_t copy_amount = std::min(
116 underlying_buffer_.get() + underlying_buffer_size_ -
117 underlying_buffer_aligned_ptr_,
118 new_alloc + required_size - new_underlying_buffer_aligned_ptr);
119 memcpy(new_underlying_buffer_aligned_ptr, underlying_buffer_aligned_ptr_,
120 copy_amount);
121 }
122
123 underlying_buffer_.reset(new_alloc);
124 underlying_buffer_size_ = required_size;
125 underlying_buffer_aligned_ptr_ = new_underlying_buffer_aligned_ptr;
126 }
127 committed_ = true;
128 return underlying_buffer_ != nullptr ? kTfLiteOk : kTfLiteError;
129 }
130
ResolveAlloc(TfLiteContext * context,const ArenaAlloc & alloc,char ** output_ptr)131 TfLiteStatus SimpleMemoryArena::ResolveAlloc(TfLiteContext* context,
132 const ArenaAlloc& alloc,
133 char** output_ptr) {
134 TF_LITE_ENSURE(context, committed_);
135 TF_LITE_ENSURE(context, output_ptr != nullptr);
136 if (alloc.size == 0) {
137 *output_ptr = nullptr;
138 } else {
139 *output_ptr = underlying_buffer_aligned_ptr_ + alloc.offset;
140 }
141 return kTfLiteOk;
142 }
143
Clear()144 TfLiteStatus SimpleMemoryArena::Clear() {
145 committed_ = false;
146 high_water_mark_ = 0;
147 allocs_.clear();
148 return kTfLiteOk;
149 }
150
151 } // namespace tflite
152