1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "swap_space.h"
18
19 #include <algorithm>
20 #include <numeric>
21 #include <sys/mman.h>
22
23 #include "base/logging.h"
24 #include "base/macros.h"
25 #include "base/mutex.h"
26 #include "thread-inl.h"
27
28 namespace art {
29
30 // The chunk size by which the swap file is increased and mapped.
31 static constexpr size_t kMininumMapSize = 16 * MB;
32
33 static constexpr bool kCheckFreeMaps = false;
34
35 template <typename FreeBySizeSet>
DumpFreeMap(const FreeBySizeSet & free_by_size)36 static void DumpFreeMap(const FreeBySizeSet& free_by_size) {
37 size_t last_size = static_cast<size_t>(-1);
38 for (const auto& entry : free_by_size) {
39 if (last_size != entry.first) {
40 last_size = entry.first;
41 LOG(INFO) << "Size " << last_size;
42 }
43 LOG(INFO) << " 0x" << std::hex << entry.second->Start()
44 << " size=" << std::dec << entry.second->size;
45 }
46 }
47
RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos)48 void SwapSpace::RemoveChunk(FreeBySizeSet::const_iterator free_by_size_pos) {
49 auto free_by_start_pos = free_by_size_pos->second;
50 free_by_size_.erase(free_by_size_pos);
51 free_by_start_.erase(free_by_start_pos);
52 }
53
InsertChunk(const SpaceChunk & chunk)54 inline void SwapSpace::InsertChunk(const SpaceChunk& chunk) {
55 DCHECK_NE(chunk.size, 0u);
56 auto insert_result = free_by_start_.insert(chunk);
57 DCHECK(insert_result.second);
58 free_by_size_.emplace(chunk.size, insert_result.first);
59 }
60
SwapSpace(int fd,size_t initial_size)61 SwapSpace::SwapSpace(int fd, size_t initial_size)
62 : fd_(fd),
63 size_(0),
64 lock_("SwapSpace lock", static_cast<LockLevel>(LockLevel::kDefaultMutexLevel - 1)) {
65 // Assume that the file is unlinked.
66
67 InsertChunk(NewFileChunk(initial_size));
68 }
69
~SwapSpace()70 SwapSpace::~SwapSpace() {
71 // Unmap all mmapped chunks. Nothing should be allocated anymore at
72 // this point, so there should be only full size chunks in free_by_start_.
73 for (const SpaceChunk& chunk : free_by_start_) {
74 if (munmap(chunk.ptr, chunk.size) != 0) {
75 PLOG(ERROR) << "Failed to unmap swap space chunk at "
76 << static_cast<const void*>(chunk.ptr) << " size=" << chunk.size;
77 }
78 }
79 // All arenas are backed by the same file. Just close the descriptor.
80 close(fd_);
81 }
82
83 template <typename FreeByStartSet, typename FreeBySizeSet>
CollectFree(const FreeByStartSet & free_by_start,const FreeBySizeSet & free_by_size)84 static size_t CollectFree(const FreeByStartSet& free_by_start, const FreeBySizeSet& free_by_size) {
85 if (free_by_start.size() != free_by_size.size()) {
86 LOG(FATAL) << "Size: " << free_by_start.size() << " vs " << free_by_size.size();
87 }
88
89 // Calculate over free_by_size.
90 size_t sum1 = 0;
91 for (const auto& entry : free_by_size) {
92 sum1 += entry.second->size;
93 }
94
95 // Calculate over free_by_start.
96 size_t sum2 = 0;
97 for (const auto& entry : free_by_start) {
98 sum2 += entry.size;
99 }
100
101 if (sum1 != sum2) {
102 LOG(FATAL) << "Sum: " << sum1 << " vs " << sum2;
103 }
104 return sum1;
105 }
106
Alloc(size_t size)107 void* SwapSpace::Alloc(size_t size) {
108 MutexLock lock(Thread::Current(), lock_);
109 size = RoundUp(size, 8U);
110
111 // Check the free list for something that fits.
112 // TODO: Smarter implementation. Global biggest chunk, ...
113 SpaceChunk old_chunk;
114 auto it = free_by_start_.empty()
115 ? free_by_size_.end()
116 : free_by_size_.lower_bound(FreeBySizeEntry { size, free_by_start_.begin() });
117 if (it != free_by_size_.end()) {
118 old_chunk = *it->second;
119 RemoveChunk(it);
120 } else {
121 // Not a big enough free chunk, need to increase file size.
122 old_chunk = NewFileChunk(size);
123 }
124
125 void* ret = old_chunk.ptr;
126
127 if (old_chunk.size != size) {
128 // Insert the remainder.
129 SpaceChunk new_chunk = { old_chunk.ptr + size, old_chunk.size - size };
130 InsertChunk(new_chunk);
131 }
132
133 return ret;
134 }
135
NewFileChunk(size_t min_size)136 SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) {
137 #if !defined(__APPLE__)
138 size_t next_part = std::max(RoundUp(min_size, kPageSize), RoundUp(kMininumMapSize, kPageSize));
139 int result = TEMP_FAILURE_RETRY(ftruncate64(fd_, size_ + next_part));
140 if (result != 0) {
141 PLOG(FATAL) << "Unable to increase swap file.";
142 }
143 uint8_t* ptr = reinterpret_cast<uint8_t*>(
144 mmap(nullptr, next_part, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, size_));
145 if (ptr == MAP_FAILED) {
146 LOG(ERROR) << "Unable to mmap new swap file chunk.";
147 LOG(ERROR) << "Current size: " << size_ << " requested: " << next_part << "/" << min_size;
148 LOG(ERROR) << "Free list:";
149 DumpFreeMap(free_by_size_);
150 LOG(ERROR) << "In free list: " << CollectFree(free_by_start_, free_by_size_);
151 LOG(FATAL) << "Aborting...";
152 }
153 size_ += next_part;
154 SpaceChunk new_chunk = {ptr, next_part};
155 return new_chunk;
156 #else
157 UNUSED(min_size, kMininumMapSize);
158 LOG(FATAL) << "No swap file support on the Mac.";
159 UNREACHABLE();
160 #endif
161 }
162
163 // TODO: Full coalescing.
Free(void * ptr,size_t size)164 void SwapSpace::Free(void* ptr, size_t size) {
165 MutexLock lock(Thread::Current(), lock_);
166 size = RoundUp(size, 8U);
167
168 size_t free_before = 0;
169 if (kCheckFreeMaps) {
170 free_before = CollectFree(free_by_start_, free_by_size_);
171 }
172
173 SpaceChunk chunk = { reinterpret_cast<uint8_t*>(ptr), size };
174 auto it = free_by_start_.lower_bound(chunk);
175 if (it != free_by_start_.begin()) {
176 auto prev = it;
177 --prev;
178 CHECK_LE(prev->End(), chunk.Start());
179 if (prev->End() == chunk.Start()) {
180 // Merge *prev with this chunk.
181 chunk.size += prev->size;
182 chunk.ptr -= prev->size;
183 auto erase_pos = free_by_size_.find(FreeBySizeEntry { prev->size, prev });
184 DCHECK(erase_pos != free_by_size_.end());
185 RemoveChunk(erase_pos);
186 // "prev" is invalidated but "it" remains valid.
187 }
188 }
189 if (it != free_by_start_.end()) {
190 CHECK_LE(chunk.End(), it->Start());
191 if (chunk.End() == it->Start()) {
192 // Merge *it with this chunk.
193 chunk.size += it->size;
194 auto erase_pos = free_by_size_.find(FreeBySizeEntry { it->size, it });
195 DCHECK(erase_pos != free_by_size_.end());
196 RemoveChunk(erase_pos);
197 // "it" is invalidated but we don't need it anymore.
198 }
199 }
200 InsertChunk(chunk);
201
202 if (kCheckFreeMaps) {
203 size_t free_after = CollectFree(free_by_start_, free_by_size_);
204
205 if (free_after != free_before + size) {
206 DumpFreeMap(free_by_size_);
207 CHECK_EQ(free_after, free_before + size) << "Should be " << size << " difference from " << free_before;
208 }
209 }
210 }
211
212 } // namespace art
213