1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "linker/arm/relative_patcher_arm_base.h"
18 
19 #include "base/stl_util.h"
20 #include "compiled_method-inl.h"
21 #include "debug/method_debug_info.h"
22 #include "dex/dex_file_types.h"
23 #include "linker/linker_patch.h"
24 #include "oat.h"
25 #include "oat_quick_method_header.h"
26 #include "stream/output_stream.h"
27 
28 namespace art {
29 namespace linker {
30 
31 class ArmBaseRelativePatcher::ThunkData {
32  public:
ThunkData(ArrayRef<const uint8_t> code,const std::string & debug_name,uint32_t max_next_offset)33   ThunkData(ArrayRef<const uint8_t> code, const std::string& debug_name, uint32_t max_next_offset)
34       : code_(code),
35         debug_name_(debug_name),
36         offsets_(),
37         max_next_offset_(max_next_offset),
38         pending_offset_(0u) {
39     DCHECK(NeedsNextThunk());  // The data is constructed only when we expect to need the thunk.
40   }
41 
42   ThunkData(ThunkData&& src) = default;
43 
CodeSize() const44   size_t CodeSize() const {
45     return code_.size();
46   }
47 
GetCode() const48   ArrayRef<const uint8_t> GetCode() const {
49     return code_;
50   }
51 
GetDebugName() const52   const std::string& GetDebugName() const {
53     return debug_name_;
54   }
55 
NeedsNextThunk() const56   bool NeedsNextThunk() const {
57     return max_next_offset_ != 0u;
58   }
59 
MaxNextOffset() const60   uint32_t MaxNextOffset() const {
61     DCHECK(NeedsNextThunk());
62     return max_next_offset_;
63   }
64 
ClearMaxNextOffset()65   void ClearMaxNextOffset() {
66     DCHECK(NeedsNextThunk());
67     max_next_offset_ = 0u;
68   }
69 
SetMaxNextOffset(uint32_t max_next_offset)70   void SetMaxNextOffset(uint32_t max_next_offset) {
71     DCHECK(!NeedsNextThunk());
72     max_next_offset_ = max_next_offset;
73   }
74 
75   // Adjust the MaxNextOffset() down if needed to fit the code before the next thunk.
76   // Returns true if it was adjusted, false if the old value was kept.
MakeSpaceBefore(const ThunkData & next_thunk,size_t alignment)77   bool MakeSpaceBefore(const ThunkData& next_thunk, size_t alignment) {
78     DCHECK(NeedsNextThunk());
79     DCHECK(next_thunk.NeedsNextThunk());
80     DCHECK_ALIGNED_PARAM(MaxNextOffset(), alignment);
81     DCHECK_ALIGNED_PARAM(next_thunk.MaxNextOffset(), alignment);
82     if (next_thunk.MaxNextOffset() - CodeSize() < MaxNextOffset()) {
83       max_next_offset_ = RoundDown(next_thunk.MaxNextOffset() - CodeSize(), alignment);
84       return true;
85     } else {
86       return false;
87     }
88   }
89 
ReserveOffset(size_t offset)90   uint32_t ReserveOffset(size_t offset) {
91     DCHECK(NeedsNextThunk());
92     DCHECK_LE(offset, max_next_offset_);
93     max_next_offset_ = 0u;  // The reserved offset should satisfy all pending references.
94     offsets_.push_back(offset);
95     return offset + CodeSize();
96   }
97 
HasReservedOffset() const98   bool HasReservedOffset() const {
99     return !offsets_.empty();
100   }
101 
LastReservedOffset() const102   uint32_t LastReservedOffset() const {
103     DCHECK(HasReservedOffset());
104     return offsets_.back();
105   }
106 
HasPendingOffset() const107   bool HasPendingOffset() const {
108     return pending_offset_ != offsets_.size();
109   }
110 
GetPendingOffset() const111   uint32_t GetPendingOffset() const {
112     DCHECK(HasPendingOffset());
113     return offsets_[pending_offset_];
114   }
115 
MarkPendingOffsetAsWritten()116   void MarkPendingOffsetAsWritten() {
117     DCHECK(HasPendingOffset());
118     ++pending_offset_;
119   }
120 
HasWrittenOffset() const121   bool HasWrittenOffset() const {
122     return pending_offset_ != 0u;
123   }
124 
LastWrittenOffset() const125   uint32_t LastWrittenOffset() const {
126     DCHECK(HasWrittenOffset());
127     return offsets_[pending_offset_ - 1u];
128   }
129 
IndexOfFirstThunkAtOrAfter(uint32_t offset) const130   size_t IndexOfFirstThunkAtOrAfter(uint32_t offset) const {
131     size_t number_of_thunks = NumberOfThunks();
132     for (size_t i = 0; i != number_of_thunks; ++i) {
133       if (GetThunkOffset(i) >= offset) {
134         return i;
135       }
136     }
137     return number_of_thunks;
138   }
139 
NumberOfThunks() const140   size_t NumberOfThunks() const {
141     return offsets_.size();
142   }
143 
GetThunkOffset(size_t index) const144   uint32_t GetThunkOffset(size_t index) const {
145     DCHECK_LT(index, NumberOfThunks());
146     return offsets_[index];
147   }
148 
149  private:
150   const ArrayRef<const uint8_t> code_;  // The code of the thunk.
151   const std::string debug_name_;        // The debug name of the thunk.
152   std::vector<uint32_t> offsets_;       // Offsets at which the thunk needs to be written.
153   uint32_t max_next_offset_;            // The maximum offset at which the next thunk can be placed.
154   uint32_t pending_offset_;             // The index of the next offset to write.
155 };
156 
157 class ArmBaseRelativePatcher::PendingThunkComparator {
158  public:
operator ()(const ThunkData * lhs,const ThunkData * rhs) const159   bool operator()(const ThunkData* lhs, const ThunkData* rhs) const {
160     DCHECK(lhs->HasPendingOffset());
161     DCHECK(rhs->HasPendingOffset());
162     // The top of the heap is defined to contain the highest element and we want to pick
163     // the thunk with the smallest pending offset, so use the reverse ordering, i.e. ">".
164     return lhs->GetPendingOffset() > rhs->GetPendingOffset();
165   }
166 };
167 
ReserveSpace(uint32_t offset,const CompiledMethod * compiled_method,MethodReference method_ref)168 uint32_t ArmBaseRelativePatcher::ReserveSpace(uint32_t offset,
169                                               const CompiledMethod* compiled_method,
170                                               MethodReference method_ref) {
171   return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u);
172 }
173 
ReserveSpaceEnd(uint32_t offset)174 uint32_t ArmBaseRelativePatcher::ReserveSpaceEnd(uint32_t offset) {
175   // For multi-oat compilations (boot image), ReserveSpaceEnd() is called for each oat file.
176   // Since we do not know here whether this is the last file or whether the next opportunity
177   // to place thunk will be soon enough, we need to reserve all needed thunks now. Code for
178   // subsequent oat files can still call back to them.
179   if (!unprocessed_method_call_patches_.empty()) {
180     ResolveMethodCalls(offset, MethodReference(nullptr, dex::kDexNoIndex));
181   }
182   for (ThunkData* data : unreserved_thunks_) {
183     uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
184     offset = data->ReserveOffset(thunk_offset);
185   }
186   unreserved_thunks_.clear();
187   // We also need to delay initiating the pending_thunks_ until the call to WriteThunks().
188   // Check that the `pending_thunks_.capacity()` indicates that no WriteThunks() has taken place.
189   DCHECK_EQ(pending_thunks_.capacity(), 0u);
190   return offset;
191 }
192 
WriteThunks(OutputStream * out,uint32_t offset)193 uint32_t ArmBaseRelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
194   if (pending_thunks_.capacity() == 0u) {
195     if (thunks_.empty()) {
196       return offset;
197     }
198     // First call to WriteThunks(), prepare the thunks for writing.
199     pending_thunks_.reserve(thunks_.size());
200     for (auto& entry : thunks_) {
201       ThunkData* data = &entry.second;
202       if (data->HasPendingOffset()) {
203         pending_thunks_.push_back(data);
204       }
205     }
206     std::make_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
207   }
208   uint32_t aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
209   while (!pending_thunks_.empty() &&
210          pending_thunks_.front()->GetPendingOffset() == aligned_offset) {
211     // Write alignment bytes and code.
212     uint32_t aligned_code_delta = aligned_offset - offset;
213     if (aligned_code_delta != 0u && UNLIKELY(!WriteCodeAlignment(out, aligned_code_delta))) {
214       return 0u;
215     }
216     if (UNLIKELY(!WriteThunk(out, pending_thunks_.front()->GetCode()))) {
217       return 0u;
218     }
219     offset = aligned_offset + pending_thunks_.front()->CodeSize();
220     // Mark the thunk as written at the pending offset and update the `pending_thunks_` heap.
221     std::pop_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
222     pending_thunks_.back()->MarkPendingOffsetAsWritten();
223     if (pending_thunks_.back()->HasPendingOffset()) {
224       std::push_heap(pending_thunks_.begin(), pending_thunks_.end(), PendingThunkComparator());
225     } else {
226       pending_thunks_.pop_back();
227     }
228     aligned_offset = CompiledMethod::AlignCode(offset, instruction_set_);
229   }
230   DCHECK(pending_thunks_.empty() || pending_thunks_.front()->GetPendingOffset() > aligned_offset);
231   return offset;
232 }
233 
GenerateThunkDebugInfo(uint32_t executable_offset)234 std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugInfo(
235     uint32_t executable_offset) {
236   // For multi-oat compilation (boot image), `thunks_` records thunks for all oat files.
237   // To return debug info for the current oat file, we must ignore thunks before the
238   // `executable_offset` as they are in the previous oat files and this function must be
239   // called before reserving thunk positions for subsequent oat files.
240   size_t number_of_thunks = 0u;
241   for (auto&& entry : thunks_) {
242     const ThunkData& data = entry.second;
243     number_of_thunks += data.NumberOfThunks() - data.IndexOfFirstThunkAtOrAfter(executable_offset);
244   }
245   std::vector<debug::MethodDebugInfo> result;
246   result.reserve(number_of_thunks);
247   for (auto&& entry : thunks_) {
248     const ThunkData& data = entry.second;
249     size_t start = data.IndexOfFirstThunkAtOrAfter(executable_offset);
250     if (start == data.NumberOfThunks()) {
251       continue;
252     }
253     // Get the base name to use for the first occurrence of the thunk.
254     const std::string& base_name = data.GetDebugName();
255     for (size_t i = start, num = data.NumberOfThunks(); i != num; ++i) {
256       debug::MethodDebugInfo info = {};
257       if (i == 0u) {
258         info.custom_name = base_name;
259       } else {
260         // Add a disambiguating tag for subsequent identical thunks. Since the `thunks_`
261         // keeps records also for thunks in previous oat files, names based on the thunk
262         // index shall be unique across the whole multi-oat output.
263         info.custom_name = base_name + "_" + std::to_string(i);
264       }
265       info.isa = instruction_set_;
266       info.is_code_address_text_relative = true;
267       info.code_address = data.GetThunkOffset(i) - executable_offset;
268       info.code_size = data.CodeSize();
269       result.push_back(std::move(info));
270     }
271   }
272   return result;
273 }
274 
ArmBaseRelativePatcher(RelativePatcherThunkProvider * thunk_provider,RelativePatcherTargetProvider * target_provider,InstructionSet instruction_set)275 ArmBaseRelativePatcher::ArmBaseRelativePatcher(RelativePatcherThunkProvider* thunk_provider,
276                                                RelativePatcherTargetProvider* target_provider,
277                                                InstructionSet instruction_set)
278     : thunk_provider_(thunk_provider),
279       target_provider_(target_provider),
280       instruction_set_(instruction_set),
281       thunks_(),
282       unprocessed_method_call_patches_(),
283       method_call_thunk_(nullptr),
284       pending_thunks_() {
285 }
286 
~ArmBaseRelativePatcher()287 ArmBaseRelativePatcher::~ArmBaseRelativePatcher() {
288   // All work done by member destructors.
289 }
290 
ReserveSpaceInternal(uint32_t offset,const CompiledMethod * compiled_method,MethodReference method_ref,uint32_t max_extra_space)291 uint32_t ArmBaseRelativePatcher::ReserveSpaceInternal(uint32_t offset,
292                                                       const CompiledMethod* compiled_method,
293                                                       MethodReference method_ref,
294                                                       uint32_t max_extra_space) {
295   // Adjust code size for extra space required by the subclass.
296   uint32_t max_code_size = compiled_method->GetQuickCode().size() + max_extra_space;
297   uint32_t code_offset;
298   uint32_t next_aligned_offset;
299   while (true) {
300     code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
301     next_aligned_offset = compiled_method->AlignCode(code_offset + max_code_size);
302     if (unreserved_thunks_.empty() ||
303         unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
304       break;
305     }
306     ThunkData* thunk = unreserved_thunks_.front();
307     if (thunk == method_call_thunk_) {
308       ResolveMethodCalls(code_offset, method_ref);
309       // This may have changed `method_call_thunk_` data, so re-check if we need to reserve.
310       if (unreserved_thunks_.empty() ||
311           unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset) {
312         break;
313       }
314       // We need to process the new `front()` whether it's still the `method_call_thunk_` or not.
315       thunk = unreserved_thunks_.front();
316     }
317     unreserved_thunks_.pop_front();
318     uint32_t thunk_offset = CompiledCode::AlignCode(offset, instruction_set_);
319     offset = thunk->ReserveOffset(thunk_offset);
320     if (thunk == method_call_thunk_) {
321       // All remaining method call patches will be handled by this thunk.
322       DCHECK(!unprocessed_method_call_patches_.empty());
323       DCHECK_LE(thunk_offset - unprocessed_method_call_patches_.front().GetPatchOffset(),
324                 MaxPositiveDisplacement(GetMethodCallKey()));
325       unprocessed_method_call_patches_.clear();
326     }
327   }
328 
329   // Process patches and check that adding thunks for the current method did not push any
330   // thunks (previously existing or newly added) before `next_aligned_offset`. This is
331   // essentially a check that we never compile a method that's too big. The calls or branches
332   // from the method should be able to reach beyond the end of the method and over any pending
333   // thunks. (The number of different thunks should be relatively low and their code short.)
334   ProcessPatches(compiled_method, code_offset);
335   CHECK(unreserved_thunks_.empty() ||
336         unreserved_thunks_.front()->MaxNextOffset() >= next_aligned_offset);
337 
338   return offset;
339 }
340 
CalculateMethodCallDisplacement(uint32_t patch_offset,uint32_t target_offset)341 uint32_t ArmBaseRelativePatcher::CalculateMethodCallDisplacement(uint32_t patch_offset,
342                                                                  uint32_t target_offset) {
343   DCHECK(method_call_thunk_ != nullptr);
344   // Unsigned arithmetic with its well-defined overflow behavior is just fine here.
345   uint32_t displacement = target_offset - patch_offset;
346   uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey());
347   uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey());
348   // NOTE: With unsigned arithmetic we do mean to use && rather than || below.
349   if (displacement > max_positive_displacement && displacement < -max_negative_displacement) {
350     // Unwritten thunks have higher offsets, check if it's within range.
351     DCHECK(!method_call_thunk_->HasPendingOffset() ||
352            method_call_thunk_->GetPendingOffset() > patch_offset);
353     if (method_call_thunk_->HasPendingOffset() &&
354         method_call_thunk_->GetPendingOffset() - patch_offset <= max_positive_displacement) {
355       displacement = method_call_thunk_->GetPendingOffset() - patch_offset;
356     } else {
357       // We must have a previous thunk then.
358       DCHECK(method_call_thunk_->HasWrittenOffset());
359       DCHECK_LT(method_call_thunk_->LastWrittenOffset(), patch_offset);
360       displacement = method_call_thunk_->LastWrittenOffset() - patch_offset;
361       DCHECK_GE(displacement, -max_negative_displacement);
362     }
363   }
364   return displacement;
365 }
366 
GetThunkTargetOffset(const ThunkKey & key,uint32_t patch_offset)367 uint32_t ArmBaseRelativePatcher::GetThunkTargetOffset(const ThunkKey& key, uint32_t patch_offset) {
368   auto it = thunks_.find(key);
369   CHECK(it != thunks_.end());
370   const ThunkData& data = it->second;
371   if (data.HasWrittenOffset()) {
372     uint32_t offset = data.LastWrittenOffset();
373     DCHECK_LT(offset, patch_offset);
374     if (patch_offset - offset <= MaxNegativeDisplacement(key)) {
375       return offset;
376     }
377   }
378   DCHECK(data.HasPendingOffset());
379   uint32_t offset = data.GetPendingOffset();
380   DCHECK_GT(offset, patch_offset);
381   DCHECK_LE(offset - patch_offset, MaxPositiveDisplacement(key));
382   return offset;
383 }
384 
GetMethodCallKey()385 ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetMethodCallKey() {
386   return ThunkKey(ThunkType::kMethodCall);
387 }
388 
GetBakerThunkKey(const LinkerPatch & patch)389 ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetBakerThunkKey(
390     const LinkerPatch& patch) {
391   DCHECK_EQ(patch.GetType(), LinkerPatch::Type::kBakerReadBarrierBranch);
392   return ThunkKey(ThunkType::kBakerReadBarrier,
393                   patch.GetBakerCustomValue1(),
394                   patch.GetBakerCustomValue2());
395 }
396 
ProcessPatches(const CompiledMethod * compiled_method,uint32_t code_offset)397 void ArmBaseRelativePatcher::ProcessPatches(const CompiledMethod* compiled_method,
398                                             uint32_t code_offset) {
399   for (const LinkerPatch& patch : compiled_method->GetPatches()) {
400     uint32_t patch_offset = code_offset + patch.LiteralOffset();
401     ThunkKey key(static_cast<ThunkType>(-1));
402     ThunkData* old_data = nullptr;
403     if (patch.GetType() == LinkerPatch::Type::kCallRelative) {
404       key = GetMethodCallKey();
405       unprocessed_method_call_patches_.emplace_back(patch_offset, patch.TargetMethod());
406       if (method_call_thunk_ == nullptr) {
407         uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
408         auto it = thunks_.Put(key, ThunkDataForPatch(patch, max_next_offset));
409         method_call_thunk_ = &it->second;
410         AddUnreservedThunk(method_call_thunk_);
411       } else {
412         old_data = method_call_thunk_;
413       }
414     } else if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch) {
415       key = GetBakerThunkKey(patch);
416       auto lb = thunks_.lower_bound(key);
417       if (lb == thunks_.end() || thunks_.key_comp()(key, lb->first)) {
418         uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
419         auto it = thunks_.PutBefore(lb, key, ThunkDataForPatch(patch, max_next_offset));
420         AddUnreservedThunk(&it->second);
421       } else {
422         old_data = &lb->second;
423       }
424     }
425     if (old_data != nullptr) {
426       // Shared path where an old thunk may need an update.
427       DCHECK(key.GetType() != static_cast<ThunkType>(-1));
428       DCHECK(!old_data->HasReservedOffset() || old_data->LastReservedOffset() < patch_offset);
429       if (old_data->NeedsNextThunk()) {
430         // Patches for a method are ordered by literal offset, so if we still need to place
431         // this thunk for a previous patch, that thunk shall be in range for this patch.
432         DCHECK_LE(old_data->MaxNextOffset(), CalculateMaxNextOffset(patch_offset, key));
433       } else {
434         if (!old_data->HasReservedOffset() ||
435             patch_offset - old_data->LastReservedOffset() > MaxNegativeDisplacement(key)) {
436           old_data->SetMaxNextOffset(CalculateMaxNextOffset(patch_offset, key));
437           AddUnreservedThunk(old_data);
438         }
439       }
440     }
441   }
442 }
443 
AddUnreservedThunk(ThunkData * data)444 void ArmBaseRelativePatcher::AddUnreservedThunk(ThunkData* data) {
445   DCHECK(data->NeedsNextThunk());
446   size_t index = unreserved_thunks_.size();
447   while (index != 0u && data->MaxNextOffset() < unreserved_thunks_[index - 1u]->MaxNextOffset()) {
448     --index;
449   }
450   unreserved_thunks_.insert(unreserved_thunks_.begin() + index, data);
451   // We may need to update the max next offset(s) if the thunk code would not fit.
452   size_t alignment = GetInstructionSetAlignment(instruction_set_);
453   if (index + 1u != unreserved_thunks_.size()) {
454     // Note: Ignore the return value as we need to process previous thunks regardless.
455     data->MakeSpaceBefore(*unreserved_thunks_[index + 1u], alignment);
456   }
457   // Make space for previous thunks. Once we find a pending thunk that does
458   // not need an adjustment, we can stop.
459   while (index != 0u && unreserved_thunks_[index - 1u]->MakeSpaceBefore(*data, alignment)) {
460     --index;
461     data = unreserved_thunks_[index];
462   }
463 }
464 
ResolveMethodCalls(uint32_t quick_code_offset,MethodReference method_ref)465 void ArmBaseRelativePatcher::ResolveMethodCalls(uint32_t quick_code_offset,
466                                                 MethodReference method_ref) {
467   DCHECK(!unreserved_thunks_.empty());
468   DCHECK(!unprocessed_method_call_patches_.empty());
469   DCHECK(method_call_thunk_ != nullptr);
470   uint32_t max_positive_displacement = MaxPositiveDisplacement(GetMethodCallKey());
471   uint32_t max_negative_displacement = MaxNegativeDisplacement(GetMethodCallKey());
472   // Process as many patches as possible, stop only on unresolved targets or calls too far back.
473   while (!unprocessed_method_call_patches_.empty()) {
474     MethodReference target_method = unprocessed_method_call_patches_.front().GetTargetMethod();
475     uint32_t patch_offset = unprocessed_method_call_patches_.front().GetPatchOffset();
476     DCHECK(!method_call_thunk_->HasReservedOffset() ||
477            method_call_thunk_->LastReservedOffset() <= patch_offset);
478     if (!method_call_thunk_->HasReservedOffset() ||
479         patch_offset - method_call_thunk_->LastReservedOffset() > max_negative_displacement) {
480       // No previous thunk in range, check if we can reach the target directly.
481       if (target_method == method_ref) {
482         DCHECK_GT(quick_code_offset, patch_offset);
483         if (quick_code_offset - patch_offset > max_positive_displacement) {
484           break;
485         }
486       } else {
487         auto result = target_provider_->FindMethodOffset(target_method);
488         if (!result.first) {
489           break;
490         }
491         uint32_t target_offset = result.second - CompiledCode::CodeDelta(instruction_set_);
492         if (target_offset >= patch_offset) {
493           DCHECK_LE(target_offset - patch_offset, max_positive_displacement);
494         } else if (patch_offset - target_offset > max_negative_displacement) {
495           break;
496         }
497       }
498     }
499     unprocessed_method_call_patches_.pop_front();
500   }
501   if (!unprocessed_method_call_patches_.empty()) {
502     // Try to adjust the max next offset in `method_call_thunk_`. Do this conservatively only if
503     // the thunk shall be at the end of the `unreserved_thunks_` to avoid dealing with overlaps.
504     uint32_t new_max_next_offset =
505         unprocessed_method_call_patches_.front().GetPatchOffset() + max_positive_displacement;
506     if (new_max_next_offset >
507         unreserved_thunks_.back()->MaxNextOffset() + unreserved_thunks_.back()->CodeSize()) {
508       method_call_thunk_->ClearMaxNextOffset();
509       method_call_thunk_->SetMaxNextOffset(new_max_next_offset);
510       if (method_call_thunk_ != unreserved_thunks_.back()) {
511         RemoveElement(unreserved_thunks_, method_call_thunk_);
512         unreserved_thunks_.push_back(method_call_thunk_);
513       }
514     }
515   } else {
516     // We have resolved all method calls, we do not need a new thunk anymore.
517     method_call_thunk_->ClearMaxNextOffset();
518     RemoveElement(unreserved_thunks_, method_call_thunk_);
519   }
520 }
521 
CalculateMaxNextOffset(uint32_t patch_offset,const ThunkKey & key)522 inline uint32_t ArmBaseRelativePatcher::CalculateMaxNextOffset(uint32_t patch_offset,
523                                                                const ThunkKey& key) {
524   return RoundDown(patch_offset + MaxPositiveDisplacement(key),
525                    GetInstructionSetAlignment(instruction_set_));
526 }
527 
ThunkDataForPatch(const LinkerPatch & patch,uint32_t max_next_offset)528 inline ArmBaseRelativePatcher::ThunkData ArmBaseRelativePatcher::ThunkDataForPatch(
529     const LinkerPatch& patch, uint32_t max_next_offset) {
530   ArrayRef<const uint8_t> code;
531   std::string debug_name;
532   thunk_provider_->GetThunkCode(patch, &code, &debug_name);
533   DCHECK(!code.empty());
534   return ThunkData(code, debug_name, max_next_offset);
535 }
536 
537 }  // namespace linker
538 }  // namespace art
539