1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "linker/arm/relative_patcher_arm_base.h"
18 
19 #include "compiled_method.h"
20 #include "linker/output_stream.h"
21 #include "oat.h"
22 #include "oat_quick_method_header.h"
23 
24 namespace art {
25 namespace linker {
26 
27 class ArmBaseRelativePatcher::ThunkData {
28  public:
ThunkData(std::vector<uint8_t> code,uint32_t max_next_offset)29   ThunkData(std::vector<uint8_t> code, uint32_t max_next_offset)
30       : code_(code),
31         offsets_(),
32         max_next_offset_(max_next_offset),
33         pending_offset_(0u) {
34     DCHECK(NeedsNextThunk());  // The data is constructed only when we expect to need the thunk.
35   }
36 
37   ThunkData(ThunkData&& src) = default;
38 
CodeSize() const39   size_t CodeSize() const {
40     return code_.size();
41   }
42 
GetCode() const43   ArrayRef<const uint8_t> GetCode() const {
44     return ArrayRef<const uint8_t>(code_);
45   }
46 
NeedsNextThunk() const47   bool NeedsNextThunk() const {
48     return max_next_offset_ != 0u;
49   }
50 
MaxNextOffset() const51   uint32_t MaxNextOffset() const {
52     DCHECK(NeedsNextThunk());
53     return max_next_offset_;
54   }
55 
ClearMaxNextOffset()56   void ClearMaxNextOffset() {
57     DCHECK(NeedsNextThunk());
58     max_next_offset_ = 0u;
59   }
60 
SetMaxNextOffset(uint32_t max_next_offset)61   void SetMaxNextOffset(uint32_t max_next_offset) {
62     DCHECK(!NeedsNextThunk());
63     max_next_offset_ = max_next_offset;
64   }
65 
66   // Adjust the MaxNextOffset() down if needed to fit the code before the next thunk.
67   // Returns true if it was adjusted, false if the old value was kept.
MakeSpaceBefore(const ThunkData & next_thunk,size_t alignment)68   bool MakeSpaceBefore(const ThunkData& next_thunk, size_t alignment) {
69     DCHECK(NeedsNextThunk());
70     DCHECK(next_thunk.NeedsNextThunk());
71     DCHECK_ALIGNED_PARAM(MaxNextOffset(), alignment);
72     DCHECK_ALIGNED_PARAM(next_thunk.MaxNextOffset(), alignment);
73     if (next_thunk.MaxNextOffset() - CodeSize() < MaxNextOffset()) {
74       max_next_offset_ = RoundDown(next_thunk.MaxNextOffset() - CodeSize(), alignment);
75       return true;
76     } else {
77       return false;
78     }
79   }
80 
ReserveOffset(size_t offset)81   uint32_t ReserveOffset(size_t offset) {
82     DCHECK(NeedsNextThunk());
83     DCHECK_LE(offset, max_next_offset_);
84     max_next_offset_ = 0u;  // The reserved offset should satisfy all pending references.
85     offsets_.push_back(offset);
86     return offset + CodeSize();
87   }
88 
HasReservedOffset() const89   bool HasReservedOffset() const {
90     return !offsets_.empty();
91   }
92 
LastReservedOffset() const93   uint32_t LastReservedOffset() const {
94     DCHECK(HasReservedOffset());
95     return offsets_.back();
96   }
97 
HasPendingOffset() const98   bool HasPendingOffset() const {
99     return pending_offset_ != offsets_.size();
100   }
101 
GetPendingOffset() const102   uint32_t GetPendingOffset() const {
103     DCHECK(HasPendingOffset());
104     return offsets_[pending_offset_];
105   }
106 
MarkPendingOffsetAsWritten()107   void MarkPendingOffsetAsWritten() {
108     DCHECK(HasPendingOffset());
109     ++pending_offset_;
110   }
111 
HasWrittenOffset() const112   bool HasWrittenOffset() const {
113     return pending_offset_ != 0u;
114   }
115 
LastWrittenOffset() const116   uint32_t LastWrittenOffset() const {
117     DCHECK(HasWrittenOffset());
118     return offsets_[pending_offset_ - 1u];
119   }
120 
121  private:
122   std::vector<uint8_t> code_;       // The code of the thunk.
123   std::vector<uint32_t> offsets_;   // Offsets at which the thunk needs to be written.
124   uint32_t max_next_offset_;        // The maximum offset at which the next thunk can be placed.
125   uint32_t pending_offset_;         // The index of the next offset to write.
126 };
127 
128 class ArmBaseRelativePatcher::PendingThunkComparator {
129  public:
operator ()(const ThunkData * lhs,const ThunkData * rhs) const130   bool operator()(const ThunkData* lhs, const ThunkData* rhs) const {
131     DCHECK(lhs->HasPendingOffset());
132     DCHECK(rhs->HasPendingOffset());
133     // The top of the heap is defined to contain the highest element and we want to pick
134     // the thunk with the smallest pending offset, so use the reverse ordering, i.e. ">".
135     return lhs->GetPendingOffset() > rhs->GetPendingOffset();
136   }
137 };
138 
ReserveSpace(uint32_t offset,const CompiledMethod * compiled_method,MethodReference method_ref)139 uint32_t ArmBaseRelativePatcher::ReserveSpace(uint32_t offset,
140                                               const CompiledMethod* compiled_method,
141                                               MethodReference method_ref) {
142   return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u);
143 }
144 
ReserveSpaceEnd(uint32_t offset)145 uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) {
146   // For multi-oat compilations (boot image), ReserveSpaceEnd() is called for each oat file.
147   // Since we do not know here whether this is the last file or whether the next opportunity
148   // to place thunk will be soon enough, we need to reserve all needed thunks now. Code for
149   // subsequent oat files can still call back to them.
150   if (!unprocessed_method_call_patches_.empty()) {
151     ResolveMethodCalls(offset, MethodReference(nullptr, DexFile::kDexNoIndex));
152   }
153   for (ThunkData* data : unreserved_thunks_) {
154     uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
155     offset = data->ReserveOffset(thunk_offset);
156   }
157   unreserved_thunks_.clear();
158   // We also need to delay initiating the pending_thunks_ until the call to WriteThunks().
159   // Check that the `pending_thunks_.capacity()` indicates that no WriteThunks() has taken place.
160   DCHECK_EQ(pending_thunks_.capacity(), 0u);
161   return offset;
162 }
163 
WriteThunks(OutputStream * out,uint32_t offset)164 uint32_t ArmBaseRelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
165   if (pending_thunks_.capacity() == 0u) {
166     if (thunks_.empty()) {
167       return offset;
168     }
169     // First call to WriteThunks(), prepare the thunks for writing.
170     pending_thunks_.reserve(thunks_.size());
171     for (auto& entry : thunks_) {
172       ThunkData* data = &entry.second;
173       if (data->HasPendingOffset()) {
174         pending_thunks_.push_back(data);
175       }
176     }
177     std::make_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
178   }
179   uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
180   while (!pending_thunks_.empty() &&
181          pending_thunks_.front()->GetPendingOffset() == aligned_offset) {
182     // Write alignment bytes and code.
183     uint32_t aligned_code_delta = aligned_offset - offset;
184     if (aligned_code_delta != 0u && UNLIKELY(!WriteCodeAlignment(out, aligned_code_delta))) {
185       return 0u;
186     }
187     if (UNLIKELY(!WriteThunk(out, pending_thunks_.front()->GetCode()))) {
188       return 0u;
189     }
190     offset = aligned_offset + pending_thunks_.front()->CodeSize();
191     // Mark the thunk as written at the pending offset and update the `pending_thunks_` heap.
192     std::pop_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
193     pending_thunks_.back()->MarkPendingOffsetAsWritten();
194     if (pending_thunks_.back()->HasPendingOffset()) {
195       std::push_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
196     } else {
197       pending_thunks_.pop_back();
198     }
199     aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
200   }
201   DCHECK(pending_thunks_.empty() || pending_thunks_.front()->GetPendingOffset() > aligned_offset);
202   return offset;
203 }
204 
ArmBaseRelativePatcher(RelativePatcherTargetProvider * provider,InstructionSet instruction_set)205 ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherTargetProvider* provider,
206                                                InstructionSet instruction_set)
207     : provider_(provider),
208       instruction_set_(instruction_set),
209       thunks_(),
210       unprocessed_method_call_patches_(),
211       method_call_thunk_(nullptr),
212       pending_thunks_() {
213 }
214 
~ArmBaseRelativePatcher()215 ArmBaseRelativePatcher::~ArmBaseRelativePatcher() {
216   // All work done by member destructors.
217 }
218 
ReserveSpaceInternal(uint32_t offset,const CompiledMethod * compiled_method,MethodReference method_ref,uint32_t max_extra_space)219 uint32_t ArmBaseRelativePatcher::ReserveSpaceInternal(uint32_t offset,
220                                                       const CompiledMethod* compiled_method,
221                                                       MethodReference method_ref,
222                                                       uint32_t max_extra_space) {
223   // Adjust code size for extra space required by the subclass.
224   uint32_t max_code_size = compiled_method->GetQuickCode().size() + max_extra_space;
225   uint32_t code_offset;
226   uint32_t next_aligned_offset;
227   while (true) {
228     code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
229     next_aligned_offset = compiled_method->AlignCode(code_offset + max_code_size);
230     if (unreserved_thunks_.empty() ||
231         unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
232       break;
233     }
234     ThunkData* thunk = unreserved_thunks_.front();
235     if (thunk == method_call_thunk_) {
236       ResolveMethodCalls(code_offset, method_ref);
237       // This may have changed `method_call_thunk_` data, so re-check if we need to reserve.
238       if (unreserved_thunks_.empty() ||
239           unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
240         break;
241       }
242       // We need to process the new `front()` whether it's still the `method_call_thunk_` or not.
243       thunk = unreserved_thunks_.front();
244     }
245     unreserved_thunks_.pop_front();
246     uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
247     offset = thunk->ReserveOffset(thunk_offset);
248     if (thunk == method_call_thunk_) {
249       // All remaining method call patches will be handled by this thunk.
250       DCHECK(!unprocessed_method_call_patches_.empty());
251       DCHECK_LE(thunk_offset - unprocessed_method_call_patches_.front().GetPatchOffset(),
252                 MaxPositiveDisplacement(ThunkType::kMethodCall));
253       unprocessed_method_call_patches_.clear();
254     }
255   }
256 
257   // Process patches and check that adding thunks for the current method did not push any
258   // thunks (previously existing or newly added) before `next_aligned_offset`. This is
259   // essentially a check that we never compile a method that's too big. The calls or branches
260   // from the method should be able to reach beyond the end of the method and over any pending
261   // thunks. (The number of different thunks should be relatively low and their code short.)
262   ProcessPatches(compiled_method, code_offset);
263   CHECK(unreserved_thunks_.empty() ||
264         unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset);
265 
266   return offset;
267 }
268 
CalculateMethodCallDisplacement(uint32_t patch_offset,uint32_t target_offset)269 uint32_t ArmBaseRelativePatcher::CalculateMethodCallDisplacement(uint32_t patch_offset,
270                                                                  uint32_t target_offset) {
271   DCHECK(method_call_thunk_ != nullptr);
272   // Unsigned arithmetic with its well-defined overflow behavior is just fine here.
273   uint32_t displacement = target_offset - patch_offset;
274   uint32_t max_positive_displacement = MaxPositiveDisplacement(ThunkType::kMethodCall);
275   uint32_t max_negative_displacement = MaxNegativeDisplacement(ThunkType::kMethodCall);
276   // NOTE: With unsigned arithmetic we do mean to use && rather than || below.
277   if (displacement > max_positive_displacement && displacement < -max_negative_displacement) {
278     // Unwritten thunks have higher offsets, check if it's within range.
279     DCHECK(!method_call_thunk_->HasPendingOffset() ||
280            method_call_thunk_->GetPendingOffset() > patch_offset);
281     if (method_call_thunk_->HasPendingOffset() &&
282         method_call_thunk_->GetPendingOffset() - patch_offset <= max_positive_displacement) {
283       displacement = method_call_thunk_->GetPendingOffset() - patch_offset;
284     } else {
285       // We must have a previous thunk then.
286       DCHECK(method_call_thunk_->HasWrittenOffset());
287       DCHECK_LT(method_call_thunk_->LastWrittenOffset(), patch_offset);
288       displacement = method_call_thunk_->LastWrittenOffset() - patch_offset;
289       DCHECK_GE(displacement, -max_negative_displacement);
290     }
291   }
292   return displacement;
293 }
294 
GetThunkTargetOffset(const ThunkKey & key,uint32_t patch_offset)295 uint32_t ArmBaseRelativePatcher::GetThunkTargetOffset(const ThunkKey& key, uint32_t patch_offset) {
296   auto it = thunks_.find(key);
297   CHECK(it != thunks_.end());
298   const ThunkData& data = it->second;
299   if (data.HasWrittenOffset()) {
300     uint32_t offset = data.LastWrittenOffset();
301     DCHECK_LT(offset, patch_offset);
302     if (patch_offset - offset <= MaxNegativeDisplacement(key.GetType())) {
303       return offset;
304     }
305   }
306   DCHECK(data.HasPendingOffset());
307   uint32_t offset = data.GetPendingOffset();
308   DCHECK_GT(offset, patch_offset);
309   DCHECK_LE(offset - patch_offset, MaxPositiveDisplacement(key.GetType()));
310   return offset;
311 }
312 
ProcessPatches(const CompiledMethod * compiled_method,uint32_t code_offset)313 void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_method,
314                                             uint32_t code_offset) {
315   for (const LinkerPatch& patch : compiled_method->GetPatches()) {
316     uint32_t patch_offset = code_offset + patch.LiteralOffset();
317     ThunkType key_type = static_cast<ThunkType>(-1);
318     ThunkData* old_data = nullptr;
319     if (patch.GetType() == LinkerPatch::Type::kCallRelative) {
320       key_type = ThunkType::kMethodCall;
321       unprocessed_method_call_patches_.emplace_back(patch_offset, patch.TargetMethod());
322       if (method_call_thunk_ == nullptr) {
323         ThunkKey key(key_type, ThunkParams{{ 0u, 0u }});  // NOLINT(whitespace/braces)
324         uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key_type);
325         auto it = thunks_.Put(key, ThunkData(CompileThunk(key), max_next_offset));
326         method_call_thunk_ = &it->second;
327         AddUnreservedThunk(method_call_thunk_);
328       } else {
329         old_data = method_call_thunk_;
330       }
331     } else if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch) {
332       ThunkKey key = GetBakerReadBarrierKey(patch);
333       key_type = key.GetType();
334       auto lb = thunks_.lower_bound(key);
335       if (lb == thunks_.end() || thunks_.key_comp()(key, lb->first)) {
336         uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key_type);
337         auto it = thunks_.PutBefore(lb, key, ThunkData(CompileThunk(key), max_next_offset));
338         AddUnreservedThunk(&it->second);
339       } else {
340         old_data = &lb->second;
341       }
342     }
343     if (old_data != nullptr) {
344       // Shared path where an old thunk may need an update.
345       DCHECK(key_type != static_cast<ThunkType>(-1));
346       DCHECK(!old_data->HasReservedOffset() || old_data->LastReservedOffset() < patch_offset);
347       if (old_data->NeedsNextThunk()) {
348         // Patches for a method are ordered by literal offset, so if we still need to place
349         // this thunk for a previous patch, that thunk shall be in range for this patch.
350         DCHECK_LE(old_data->MaxNextOffset(), CalculateMaxNextOffset(patch_offset, key_type));
351       } else {
352         if (!old_data->HasReservedOffset() ||
353             patch_offset - old_data->LastReservedOffset() > MaxNegativeDisplacement(key_type)) {
354           old_data->SetMaxNextOffset(CalculateMaxNextOffset(patch_offset, key_type));
355           AddUnreservedThunk(old_data);
356         }
357       }
358     }
359   }
360 }
361 
AddUnreservedThunk(ThunkData * data)362 void ArmBaseRelativePatcher::AddUnreservedThunk(ThunkData* data) {
363   DCHECK(data->NeedsNextThunk());
364   size_t index = unreserved_thunks_.size();
365   while (index != 0u && data->MaxNextOffset() < unreserved_thunks_[index - 1u]->MaxNextOffset()) {
366     --index;
367   }
368   unreserved_thunks_.insert(unreserved_thunks_.begin() + index, data);
369   // We may need to update the max next offset(s) if the thunk code would not fit.
370   size_t alignment = GetInstructionSetAlignment(instruction_set_);
371   if (index + 1u != unreserved_thunks_.size()) {
372     // Note: Ignore the return value as we need to process previous thunks regardless.
373     data->MakeSpaceBefore(*unreserved_thunks_[index + 1u], alignment);
374   }
375   // Make space for previous thunks. Once we find a pending thunk that does
376   // not need an adjustment, we can stop.
377   while (index != 0u && unreserved_thunks_[index - 1u]->MakeSpaceBefore(*data, alignment)) {
378     --index;
379     data = unreserved_thunks_[index];
380   }
381 }
382 
ResolveMethodCalls(uint32_t quick_code_offset,MethodReference method_ref)383 void ArmBaseRelativePatcher::ResolveMethodCalls(uint32_t quick_code_offset,
384                                                 MethodReference method_ref) {
385   DCHECK(!unreserved_thunks_.empty());
386   DCHECK(!unprocessed_method_call_patches_.empty());
387   DCHECK(method_call_thunk_ != nullptr);
388   uint32_t max_positive_displacement = MaxPositiveDisplacement(ThunkType::kMethodCall);
389   uint32_t max_negative_displacement = MaxNegativeDisplacement(ThunkType::kMethodCall);
390   // Process as many patches as possible, stop only on unresolved targets or calls too far back.
391   while (!unprocessed_method_call_patches_.empty()) {
392     MethodReference target_method = unprocessed_method_call_patches_.front().GetTargetMethod();
393     uint32_t patch_offset = unprocessed_method_call_patches_.front().GetPatchOffset();
394     DCHECK(!method_call_thunk_->HasReservedOffset() ||
395            method_call_thunk_->LastReservedOffset() <= patch_offset);
396     if (!method_call_thunk_->HasReservedOffset() ||
397         patch_offset - method_call_thunk_->LastReservedOffset() > max_negative_displacement) {
398       // No previous thunk in range, check if we can reach the target directly.
399       if (target_method.dex_file == method_ref.dex_file &&
400           target_method.dex_method_index == method_ref.dex_method_index) {
401         DCHECK_GT(quick_code_offset, patch_offset);
402         if (quick_code_offset - patch_offset > max_positive_displacement) {
403           break;
404         }
405       } else {
406         auto result = provider_->FindMethodOffset(target_method);
407         if (!result.first) {
408           break;
409         }
410         uint32_t target_offset = result.second - CompiledCode::CodeDelta(instruction_set_);
411         if (target_offset >= patch_offset) {
412           DCHECK_LE(target_offset - patch_offset, max_positive_displacement);
413         } else if (patch_offset - target_offset > max_negative_displacement) {
414           break;
415         }
416       }
417     }
418     unprocessed_method_call_patches_.pop_front();
419   }
420   if (!unprocessed_method_call_patches_.empty()) {
421     // Try to adjust the max next offset in `method_call_thunk_`. Do this conservatively only if
422     // the thunk shall be at the end of the `unreserved_thunks_` to avoid dealing with overlaps.
423     uint32_t new_max_next_offset =
424         unprocessed_method_call_patches_.front().GetPatchOffset() + max_positive_displacement;
425     if (new_max_next_offset >
426         unreserved_thunks_.back()->MaxNextOffset() + unreserved_thunks_.back()->CodeSize()) {
427       method_call_thunk_->ClearMaxNextOffset();
428       method_call_thunk_->SetMaxNextOffset(new_max_next_offset);
429       if (method_call_thunk_ != unreserved_thunks_.back()) {
430         RemoveElement(unreserved_thunks_, method_call_thunk_);
431         unreserved_thunks_.push_back(method_call_thunk_);
432       }
433     }
434   } else {
435     // We have resolved all method calls, we do not need a new thunk anymore.
436     method_call_thunk_->ClearMaxNextOffset();
437     RemoveElement(unreserved_thunks_, method_call_thunk_);
438   }
439 }
440 
CalculateMaxNextOffset(uint32_t patch_offset,ThunkType type)441 inline uint32_t ArmBaseRelativePatcher::CalculateMaxNextOffset(uint32_t patch_offset,
442                                                                ThunkType type) {
443   return RoundDown(patch_offset + MaxPositiveDisplacement(type),
444                    GetInstructionSetAlignment(instruction_set_));
445 }
446 
447 }  // namespace linker
448 }  // namespace art
449