1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "linker/arm64/relative_patcher_arm64.h"
18
19 #include "arch/arm64/asm_support_arm64.h"
20 #include "arch/arm64/instruction_set_features_arm64.h"
21 #include "art_method.h"
22 #include "base/bit_utils.h"
23 #include "compiled_method-inl.h"
24 #include "driver/compiler_driver.h"
25 #include "entrypoints/quick/quick_entrypoints_enum.h"
26 #include "heap_poisoning.h"
27 #include "linker/linker_patch.h"
28 #include "linker/output_stream.h"
29 #include "lock_word.h"
30 #include "mirror/array-inl.h"
31 #include "mirror/object.h"
32 #include "oat.h"
33 #include "oat_quick_method_header.h"
34 #include "read_barrier.h"
35 #include "utils/arm64/assembler_arm64.h"
36
37 namespace art {
38 namespace linker {
39
40 namespace {
41
42 // Maximum positive and negative displacement for method call measured from the patch location.
43 // (Signed 28 bit displacement with the last two bits 0 has range [-2^27, 2^27-4] measured from
44 // the ARM64 PC pointing to the BL.)
45 constexpr uint32_t kMaxMethodCallPositiveDisplacement = (1u << 27) - 4u;
46 constexpr uint32_t kMaxMethodCallNegativeDisplacement = (1u << 27);
47
48 // Maximum positive and negative displacement for a conditional branch measured from the patch
49 // location. (Signed 21 bit displacement with the last two bits 0 has range [-2^20, 2^20-4]
50 // measured from the ARM64 PC pointing to the B.cond.)
51 constexpr uint32_t kMaxBcondPositiveDisplacement = (1u << 20) - 4u;
52 constexpr uint32_t kMaxBcondNegativeDisplacement = (1u << 20);
53
54 // The ADRP thunk for erratum 843419 is 2 instructions, i.e. 8 bytes.
55 constexpr uint32_t kAdrpThunkSize = 8u;
56
IsAdrpPatch(const LinkerPatch & patch)57 inline bool IsAdrpPatch(const LinkerPatch& patch) {
58 switch (patch.GetType()) {
59 case LinkerPatch::Type::kCall:
60 case LinkerPatch::Type::kCallRelative:
61 case LinkerPatch::Type::kBakerReadBarrierBranch:
62 return false;
63 case LinkerPatch::Type::kMethodRelative:
64 case LinkerPatch::Type::kMethodBssEntry:
65 case LinkerPatch::Type::kTypeRelative:
66 case LinkerPatch::Type::kTypeClassTable:
67 case LinkerPatch::Type::kTypeBssEntry:
68 case LinkerPatch::Type::kStringRelative:
69 case LinkerPatch::Type::kStringInternTable:
70 case LinkerPatch::Type::kStringBssEntry:
71 return patch.LiteralOffset() == patch.PcInsnOffset();
72 }
73 }
74
MaxExtraSpace(size_t num_adrp,size_t code_size)75 inline uint32_t MaxExtraSpace(size_t num_adrp, size_t code_size) {
76 if (num_adrp == 0u) {
77 return 0u;
78 }
79 uint32_t alignment_bytes =
80 CompiledMethod::AlignCode(code_size, InstructionSet::kArm64) - code_size;
81 return kAdrpThunkSize * num_adrp + alignment_bytes;
82 }
83
84 } // anonymous namespace
85
Arm64RelativePatcher(RelativePatcherTargetProvider * provider,const Arm64InstructionSetFeatures * features)86 Arm64RelativePatcher::Arm64RelativePatcher(RelativePatcherTargetProvider* provider,
87 const Arm64InstructionSetFeatures* features)
88 : ArmBaseRelativePatcher(provider, InstructionSet::kArm64),
89 fix_cortex_a53_843419_(features->NeedFixCortexA53_843419()),
90 reserved_adrp_thunks_(0u),
91 processed_adrp_thunks_(0u) {
92 if (fix_cortex_a53_843419_) {
93 adrp_thunk_locations_.reserve(16u);
94 current_method_thunks_.reserve(16u * kAdrpThunkSize);
95 }
96 }
97
ReserveSpace(uint32_t offset,const CompiledMethod * compiled_method,MethodReference method_ref)98 uint32_t Arm64RelativePatcher::ReserveSpace(uint32_t offset,
99 const CompiledMethod* compiled_method,
100 MethodReference method_ref) {
101 if (!fix_cortex_a53_843419_) {
102 DCHECK(adrp_thunk_locations_.empty());
103 return ReserveSpaceInternal(offset, compiled_method, method_ref, 0u);
104 }
105
106 // Add thunks for previous method if any.
107 if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
108 size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
109 offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) +
110 kAdrpThunkSize * num_adrp_thunks;
111 reserved_adrp_thunks_ = adrp_thunk_locations_.size();
112 }
113
114 // Count the number of ADRP insns as the upper bound on the number of thunks needed
115 // and use it to reserve space for other linker patches.
116 size_t num_adrp = 0u;
117 DCHECK(compiled_method != nullptr);
118 for (const LinkerPatch& patch : compiled_method->GetPatches()) {
119 if (IsAdrpPatch(patch)) {
120 ++num_adrp;
121 }
122 }
123 ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
124 uint32_t max_extra_space = MaxExtraSpace(num_adrp, code.size());
125 offset = ReserveSpaceInternal(offset, compiled_method, method_ref, max_extra_space);
126 if (num_adrp == 0u) {
127 return offset;
128 }
129
130 // Now that we have the actual offset where the code will be placed, locate the ADRP insns
131 // that actually require the thunk.
132 uint32_t quick_code_offset = compiled_method->AlignCode(offset + sizeof(OatQuickMethodHeader));
133 uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size());
134 DCHECK(compiled_method != nullptr);
135 for (const LinkerPatch& patch : compiled_method->GetPatches()) {
136 if (IsAdrpPatch(patch)) {
137 uint32_t patch_offset = quick_code_offset + patch.LiteralOffset();
138 if (NeedsErratum843419Thunk(code, patch.LiteralOffset(), patch_offset)) {
139 adrp_thunk_locations_.emplace_back(patch_offset, thunk_offset);
140 thunk_offset += kAdrpThunkSize;
141 }
142 }
143 }
144 return offset;
145 }
146
ReserveSpaceEnd(uint32_t offset)147 uint32_t Arm64RelativePatcher::ReserveSpaceEnd(uint32_t offset) {
148 if (!fix_cortex_a53_843419_) {
149 DCHECK(adrp_thunk_locations_.empty());
150 } else {
151 // Add thunks for the last method if any.
152 if (reserved_adrp_thunks_ != adrp_thunk_locations_.size()) {
153 size_t num_adrp_thunks = adrp_thunk_locations_.size() - reserved_adrp_thunks_;
154 offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64) +
155 kAdrpThunkSize * num_adrp_thunks;
156 reserved_adrp_thunks_ = adrp_thunk_locations_.size();
157 }
158 }
159 return ArmBaseRelativePatcher::ReserveSpaceEnd(offset);
160 }
161
WriteThunks(OutputStream * out,uint32_t offset)162 uint32_t Arm64RelativePatcher::WriteThunks(OutputStream* out, uint32_t offset) {
163 if (fix_cortex_a53_843419_) {
164 if (!current_method_thunks_.empty()) {
165 uint32_t aligned_offset = CompiledMethod::AlignCode(offset, InstructionSet::kArm64);
166 if (kIsDebugBuild) {
167 CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
168 size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
169 CHECK_LE(num_thunks, processed_adrp_thunks_);
170 for (size_t i = 0u; i != num_thunks; ++i) {
171 const auto& entry = adrp_thunk_locations_[processed_adrp_thunks_ - num_thunks + i];
172 CHECK_EQ(entry.second, aligned_offset + i * kAdrpThunkSize);
173 }
174 }
175 uint32_t aligned_code_delta = aligned_offset - offset;
176 if (aligned_code_delta != 0u && !WriteCodeAlignment(out, aligned_code_delta)) {
177 return 0u;
178 }
179 if (!WriteMiscThunk(out, ArrayRef<const uint8_t>(current_method_thunks_))) {
180 return 0u;
181 }
182 offset = aligned_offset + current_method_thunks_.size();
183 current_method_thunks_.clear();
184 }
185 }
186 return ArmBaseRelativePatcher::WriteThunks(out, offset);
187 }
188
PatchCall(std::vector<uint8_t> * code,uint32_t literal_offset,uint32_t patch_offset,uint32_t target_offset)189 void Arm64RelativePatcher::PatchCall(std::vector<uint8_t>* code,
190 uint32_t literal_offset,
191 uint32_t patch_offset, uint32_t
192 target_offset) {
193 DCHECK_LE(literal_offset + 4u, code->size());
194 DCHECK_EQ(literal_offset & 3u, 0u);
195 DCHECK_EQ(patch_offset & 3u, 0u);
196 DCHECK_EQ(target_offset & 3u, 0u);
197 uint32_t displacement = CalculateMethodCallDisplacement(patch_offset, target_offset & ~1u);
198 DCHECK_EQ(displacement & 3u, 0u);
199 DCHECK((displacement >> 27) == 0u || (displacement >> 27) == 31u); // 28-bit signed.
200 uint32_t insn = (displacement & 0x0fffffffu) >> 2;
201 insn |= 0x94000000; // BL
202
203 // Check that we're just overwriting an existing BL.
204 DCHECK_EQ(GetInsn(code, literal_offset) & 0xfc000000u, 0x94000000u);
205 // Write the new BL.
206 SetInsn(code, literal_offset, insn);
207 }
208
PatchPcRelativeReference(std::vector<uint8_t> * code,const LinkerPatch & patch,uint32_t patch_offset,uint32_t target_offset)209 void Arm64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
210 const LinkerPatch& patch,
211 uint32_t patch_offset,
212 uint32_t target_offset) {
213 DCHECK_EQ(patch_offset & 3u, 0u);
214 DCHECK_EQ(target_offset & 3u, 0u);
215 uint32_t literal_offset = patch.LiteralOffset();
216 uint32_t insn = GetInsn(code, literal_offset);
217 uint32_t pc_insn_offset = patch.PcInsnOffset();
218 uint32_t disp = target_offset - ((patch_offset - literal_offset + pc_insn_offset) & ~0xfffu);
219 bool wide = (insn & 0x40000000) != 0;
220 uint32_t shift = wide ? 3u : 2u;
221 if (literal_offset == pc_insn_offset) {
222 // Check it's an ADRP with imm == 0 (unset).
223 DCHECK_EQ((insn & 0xffffffe0u), 0x90000000u)
224 << literal_offset << ", " << pc_insn_offset << ", 0x" << std::hex << insn;
225 if (fix_cortex_a53_843419_ && processed_adrp_thunks_ != adrp_thunk_locations_.size() &&
226 adrp_thunk_locations_[processed_adrp_thunks_].first == patch_offset) {
227 DCHECK(NeedsErratum843419Thunk(ArrayRef<const uint8_t>(*code),
228 literal_offset, patch_offset));
229 uint32_t thunk_offset = adrp_thunk_locations_[processed_adrp_thunks_].second;
230 uint32_t adrp_disp = target_offset - (thunk_offset & ~0xfffu);
231 uint32_t adrp = PatchAdrp(insn, adrp_disp);
232
233 uint32_t out_disp = thunk_offset - patch_offset;
234 DCHECK_EQ(out_disp & 3u, 0u);
235 DCHECK((out_disp >> 27) == 0u || (out_disp >> 27) == 31u); // 28-bit signed.
236 insn = (out_disp & 0x0fffffffu) >> shift;
237 insn |= 0x14000000; // B <thunk>
238
239 uint32_t back_disp = -out_disp;
240 DCHECK_EQ(back_disp & 3u, 0u);
241 DCHECK((back_disp >> 27) == 0u || (back_disp >> 27) == 31u); // 28-bit signed.
242 uint32_t b_back = (back_disp & 0x0fffffffu) >> 2;
243 b_back |= 0x14000000; // B <back>
244 size_t thunks_code_offset = current_method_thunks_.size();
245 current_method_thunks_.resize(thunks_code_offset + kAdrpThunkSize);
246 SetInsn(¤t_method_thunks_, thunks_code_offset, adrp);
247 SetInsn(¤t_method_thunks_, thunks_code_offset + 4u, b_back);
248 static_assert(kAdrpThunkSize == 2 * 4u, "thunk has 2 instructions");
249
250 processed_adrp_thunks_ += 1u;
251 } else {
252 insn = PatchAdrp(insn, disp);
253 }
254 // Write the new ADRP (or B to the erratum 843419 thunk).
255 SetInsn(code, literal_offset, insn);
256 } else {
257 if ((insn & 0xfffffc00) == 0x91000000) {
258 // ADD immediate, 64-bit with imm12 == 0 (unset).
259 if (!kEmitCompilerReadBarrier) {
260 DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
261 patch.GetType() == LinkerPatch::Type::kTypeRelative ||
262 patch.GetType() == LinkerPatch::Type::kStringRelative) << patch.GetType();
263 } else {
264 // With the read barrier (non-Baker) enabled, it could be kStringBssEntry or kTypeBssEntry.
265 DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
266 patch.GetType() == LinkerPatch::Type::kTypeRelative ||
267 patch.GetType() == LinkerPatch::Type::kStringRelative ||
268 patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
269 patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
270 }
271 shift = 0u; // No shift for ADD.
272 } else {
273 // LDR/STR 32-bit or 64-bit with imm12 == 0 (unset).
274 DCHECK(patch.GetType() == LinkerPatch::Type::kMethodBssEntry ||
275 patch.GetType() == LinkerPatch::Type::kTypeClassTable ||
276 patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
277 patch.GetType() == LinkerPatch::Type::kStringInternTable ||
278 patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
279 DCHECK_EQ(insn & 0xbfbffc00, 0xb9000000) << std::hex << insn;
280 }
281 if (kIsDebugBuild) {
282 uint32_t adrp = GetInsn(code, pc_insn_offset);
283 if ((adrp & 0x9f000000u) != 0x90000000u) {
284 CHECK(fix_cortex_a53_843419_);
285 CHECK_EQ(adrp & 0xfc000000u, 0x14000000u); // B <thunk>
286 CHECK_ALIGNED(current_method_thunks_.size(), kAdrpThunkSize);
287 size_t num_thunks = current_method_thunks_.size() / kAdrpThunkSize;
288 CHECK_LE(num_thunks, processed_adrp_thunks_);
289 uint32_t b_offset = patch_offset - literal_offset + pc_insn_offset;
290 for (size_t i = processed_adrp_thunks_ - num_thunks; ; ++i) {
291 CHECK_NE(i, processed_adrp_thunks_);
292 if (adrp_thunk_locations_[i].first == b_offset) {
293 size_t idx = num_thunks - (processed_adrp_thunks_ - i);
294 adrp = GetInsn(¤t_method_thunks_, idx * kAdrpThunkSize);
295 break;
296 }
297 }
298 }
299 CHECK_EQ(adrp & 0x9f00001fu, // Check that pc_insn_offset points
300 0x90000000 | ((insn >> 5) & 0x1fu)); // to ADRP with matching register.
301 }
302 uint32_t imm12 = (disp & 0xfffu) >> shift;
303 insn = (insn & ~(0xfffu << 10)) | (imm12 << 10);
304 SetInsn(code, literal_offset, insn);
305 }
306 }
307
PatchBakerReadBarrierBranch(std::vector<uint8_t> * code,const LinkerPatch & patch,uint32_t patch_offset)308 void Arm64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
309 const LinkerPatch& patch,
310 uint32_t patch_offset) {
311 DCHECK_ALIGNED(patch_offset, 4u);
312 uint32_t literal_offset = patch.LiteralOffset();
313 DCHECK_ALIGNED(literal_offset, 4u);
314 DCHECK_LT(literal_offset, code->size());
315 uint32_t insn = GetInsn(code, literal_offset);
316 DCHECK_EQ(insn & 0xffffffe0u, 0xb5000000); // CBNZ Xt, +0 (unpatched)
317 ThunkKey key = GetBakerThunkKey(patch);
318 if (kIsDebugBuild) {
319 const uint32_t encoded_data = key.GetCustomValue1();
320 BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
321 // Check that the next instruction matches the expected LDR.
322 switch (kind) {
323 case BakerReadBarrierKind::kField: {
324 DCHECK_GE(code->size() - literal_offset, 8u);
325 uint32_t next_insn = GetInsn(code, literal_offset + 4u);
326 // LDR (immediate) with correct base_reg.
327 CheckValidReg(next_insn & 0x1fu); // Check destination register.
328 const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
329 CHECK_EQ(next_insn & 0xffc003e0u, 0xb9400000u | (base_reg << 5));
330 break;
331 }
332 case BakerReadBarrierKind::kArray: {
333 DCHECK_GE(code->size() - literal_offset, 8u);
334 uint32_t next_insn = GetInsn(code, literal_offset + 4u);
335 // LDR (register) with the correct base_reg, size=10 (32-bit), option=011 (extend = LSL),
336 // and S=1 (shift amount = 2 for 32-bit version), i.e. LDR Wt, [Xn, Xm, LSL #2].
337 CheckValidReg(next_insn & 0x1fu); // Check destination register.
338 const uint32_t base_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
339 CHECK_EQ(next_insn & 0xffe0ffe0u, 0xb8607800u | (base_reg << 5));
340 CheckValidReg((next_insn >> 16) & 0x1f); // Check index register
341 break;
342 }
343 case BakerReadBarrierKind::kGcRoot: {
344 DCHECK_GE(literal_offset, 4u);
345 uint32_t prev_insn = GetInsn(code, literal_offset - 4u);
346 // LDR (immediate) with correct root_reg.
347 const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
348 CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg);
349 break;
350 }
351 default:
352 LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
353 UNREACHABLE();
354 }
355 }
356 uint32_t target_offset = GetThunkTargetOffset(key, patch_offset);
357 DCHECK_ALIGNED(target_offset, 4u);
358 uint32_t disp = target_offset - patch_offset;
359 DCHECK((disp >> 20) == 0u || (disp >> 20) == 4095u); // 21-bit signed.
360 insn |= (disp << (5 - 2)) & 0x00ffffe0u; // Shift bits 2-20 to 5-23.
361 SetInsn(code, literal_offset, insn);
362 }
363
364 #define __ assembler.GetVIXLAssembler()->
365
EmitGrayCheckAndFastPath(arm64::Arm64Assembler & assembler,vixl::aarch64::Register base_reg,vixl::aarch64::MemOperand & lock_word,vixl::aarch64::Label * slow_path)366 static void EmitGrayCheckAndFastPath(arm64::Arm64Assembler& assembler,
367 vixl::aarch64::Register base_reg,
368 vixl::aarch64::MemOperand& lock_word,
369 vixl::aarch64::Label* slow_path) {
370 using namespace vixl::aarch64; // NOLINT(build/namespaces)
371 // Load the lock word containing the rb_state.
372 __ Ldr(ip0.W(), lock_word);
373 // Given the numeric representation, it's enough to check the low bit of the rb_state.
374 static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
375 static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
376 __ Tbnz(ip0.W(), LockWord::kReadBarrierStateShift, slow_path);
377 static_assert(
378 BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET,
379 "Field and array LDR offsets must be the same to reuse the same code.");
380 // Adjust the return address back to the LDR (1 instruction; 2 for heap poisoning).
381 static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
382 "Field LDR must be 1 instruction (4B) before the return address label; "
383 " 2 instructions (8B) for heap poisoning.");
384 __ Add(lr, lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET);
385 // Introduce a dependency on the lock_word including rb_state,
386 // to prevent load-load reordering, and without using
387 // a memory barrier (which would be more expensive).
388 __ Add(base_reg, base_reg, Operand(ip0, LSR, 32));
389 __ Br(lr); // And return back to the function.
390 // Note: The fake dependency is unnecessary for the slow path.
391 }
392
393 // Load the read barrier introspection entrypoint in register `entrypoint`.
LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler & assembler,vixl::aarch64::Register entrypoint)394 static void LoadReadBarrierMarkIntrospectionEntrypoint(arm64::Arm64Assembler& assembler,
395 vixl::aarch64::Register entrypoint) {
396 using vixl::aarch64::MemOperand;
397 using vixl::aarch64::ip0;
398 // Thread Register.
399 const vixl::aarch64::Register tr = vixl::aarch64::x19;
400
401 // entrypoint = Thread::Current()->pReadBarrierMarkReg16, i.e. pReadBarrierMarkIntrospection.
402 DCHECK_EQ(ip0.GetCode(), 16u);
403 const int32_t entry_point_offset =
404 Thread::ReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ip0.GetCode());
405 __ Ldr(entrypoint, MemOperand(tr, entry_point_offset));
406 }
407
CompileBakerReadBarrierThunk(arm64::Arm64Assembler & assembler,uint32_t encoded_data)408 void Arm64RelativePatcher::CompileBakerReadBarrierThunk(arm64::Arm64Assembler& assembler,
409 uint32_t encoded_data) {
410 using namespace vixl::aarch64; // NOLINT(build/namespaces)
411 BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
412 switch (kind) {
413 case BakerReadBarrierKind::kField: {
414 // Check if the holder is gray and, if not, add fake dependency to the base register
415 // and return to the LDR instruction to load the reference. Otherwise, use introspection
416 // to load the reference and call the entrypoint (in IP1) that performs further checks
417 // on the reference and marks it if needed.
418 auto base_reg =
419 Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
420 CheckValidReg(base_reg.GetCode());
421 auto holder_reg =
422 Register::GetXRegFromCode(BakerReadBarrierSecondRegField::Decode(encoded_data));
423 CheckValidReg(holder_reg.GetCode());
424 UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
425 temps.Exclude(ip0, ip1);
426 // If base_reg differs from holder_reg, the offset was too large and we must have
427 // emitted an explicit null check before the load. Otherwise, we need to null-check
428 // the holder as we do not necessarily do that check before going to the thunk.
429 vixl::aarch64::Label throw_npe;
430 if (holder_reg.Is(base_reg)) {
431 __ Cbz(holder_reg.W(), &throw_npe);
432 }
433 vixl::aarch64::Label slow_path;
434 MemOperand lock_word(holder_reg, mirror::Object::MonitorOffset().Int32Value());
435 EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path);
436 __ Bind(&slow_path);
437 MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET);
438 __ Ldr(ip0.W(), ldr_address); // Load the LDR (immediate) unsigned offset.
439 LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
440 __ Ubfx(ip0.W(), ip0.W(), 10, 12); // Extract the offset.
441 __ Ldr(ip0.W(), MemOperand(base_reg, ip0, LSL, 2)); // Load the reference.
442 // Do not unpoison. With heap poisoning enabled, the entrypoint expects a poisoned reference.
443 __ Br(ip1); // Jump to the entrypoint.
444 if (holder_reg.Is(base_reg)) {
445 // Add null check slow path. The stack map is at the address pointed to by LR.
446 __ Bind(&throw_npe);
447 int32_t offset = GetThreadOffset<kArm64PointerSize>(kQuickThrowNullPointer).Int32Value();
448 __ Ldr(ip0, MemOperand(/* Thread* */ vixl::aarch64::x19, offset));
449 __ Br(ip0);
450 }
451 break;
452 }
453 case BakerReadBarrierKind::kArray: {
454 auto base_reg =
455 Register::GetXRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
456 CheckValidReg(base_reg.GetCode());
457 DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
458 UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
459 temps.Exclude(ip0, ip1);
460 vixl::aarch64::Label slow_path;
461 int32_t data_offset =
462 mirror::Array::DataOffset(Primitive::ComponentSize(Primitive::kPrimNot)).Int32Value();
463 MemOperand lock_word(base_reg, mirror::Object::MonitorOffset().Int32Value() - data_offset);
464 DCHECK_LT(lock_word.GetOffset(), 0);
465 EmitGrayCheckAndFastPath(assembler, base_reg, lock_word, &slow_path);
466 __ Bind(&slow_path);
467 MemOperand ldr_address(lr, BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
468 __ Ldr(ip0.W(), ldr_address); // Load the LDR (register) unsigned offset.
469 LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
470 __ Ubfx(ip0, ip0, 16, 6); // Extract the index register, plus 32 (bit 21 is set).
471 __ Bfi(ip1, ip0, 3, 6); // Insert ip0 to the entrypoint address to create
472 // a switch case target based on the index register.
473 __ Mov(ip0, base_reg); // Move the base register to ip0.
474 __ Br(ip1); // Jump to the entrypoint's array switch case.
475 break;
476 }
477 case BakerReadBarrierKind::kGcRoot: {
478 // Check if the reference needs to be marked and if so (i.e. not null, not marked yet
479 // and it does not have a forwarding address), call the correct introspection entrypoint;
480 // otherwise return the reference (or the extracted forwarding address).
481 // There is no gray bit check for GC roots.
482 auto root_reg =
483 Register::GetWRegFromCode(BakerReadBarrierFirstRegField::Decode(encoded_data));
484 CheckValidReg(root_reg.GetCode());
485 DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
486 UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
487 temps.Exclude(ip0, ip1);
488 vixl::aarch64::Label return_label, not_marked, forwarding_address;
489 __ Cbz(root_reg, &return_label);
490 MemOperand lock_word(root_reg.X(), mirror::Object::MonitorOffset().Int32Value());
491 __ Ldr(ip0.W(), lock_word);
492 __ Tbz(ip0.W(), LockWord::kMarkBitStateShift, ¬_marked);
493 __ Bind(&return_label);
494 __ Br(lr);
495 __ Bind(¬_marked);
496 __ Tst(ip0.W(), Operand(ip0.W(), LSL, 1));
497 __ B(&forwarding_address, mi);
498 LoadReadBarrierMarkIntrospectionEntrypoint(assembler, ip1);
499 // Adjust the art_quick_read_barrier_mark_introspection address in IP1 to
500 // art_quick_read_barrier_mark_introspection_gc_roots.
501 __ Add(ip1, ip1, Operand(BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET));
502 __ Mov(ip0.W(), root_reg);
503 __ Br(ip1);
504 __ Bind(&forwarding_address);
505 __ Lsl(root_reg, ip0.W(), LockWord::kForwardingAddressShift);
506 __ Br(lr);
507 break;
508 }
509 default:
510 LOG(FATAL) << "Unexpected kind: " << static_cast<uint32_t>(kind);
511 UNREACHABLE();
512 }
513 }
514
CompileThunk(const ThunkKey & key)515 std::vector<uint8_t> Arm64RelativePatcher::CompileThunk(const ThunkKey& key) {
516 ArenaPool pool;
517 ArenaAllocator allocator(&pool);
518 arm64::Arm64Assembler assembler(&allocator);
519
520 switch (key.GetType()) {
521 case ThunkType::kMethodCall: {
522 // The thunk just uses the entry point in the ArtMethod. This works even for calls
523 // to the generic JNI and interpreter trampolines.
524 Offset offset(ArtMethod::EntryPointFromQuickCompiledCodeOffset(
525 kArm64PointerSize).Int32Value());
526 assembler.JumpTo(ManagedRegister(arm64::X0), offset, ManagedRegister(arm64::IP0));
527 break;
528 }
529 case ThunkType::kBakerReadBarrier: {
530 CompileBakerReadBarrierThunk(assembler, key.GetCustomValue1());
531 break;
532 }
533 }
534
535 // Ensure we emit the literal pool.
536 assembler.FinalizeCode();
537 std::vector<uint8_t> thunk_code(assembler.CodeSize());
538 MemoryRegion code(thunk_code.data(), thunk_code.size());
539 assembler.FinalizeInstructions(code);
540 return thunk_code;
541 }
542
GetThunkDebugName(const ThunkKey & key)543 std::string Arm64RelativePatcher::GetThunkDebugName(const ThunkKey& key) {
544 switch (key.GetType()) {
545 case ThunkType::kMethodCall:
546 return "MethodCallThunk";
547
548 case ThunkType::kBakerReadBarrier: {
549 uint32_t encoded_data = key.GetCustomValue1();
550 BakerReadBarrierKind kind = BakerReadBarrierKindField::Decode(encoded_data);
551 std::ostringstream oss;
552 oss << "BakerReadBarrierThunk";
553 switch (kind) {
554 case BakerReadBarrierKind::kField:
555 oss << "Field_r" << BakerReadBarrierFirstRegField::Decode(encoded_data)
556 << "_r" << BakerReadBarrierSecondRegField::Decode(encoded_data);
557 break;
558 case BakerReadBarrierKind::kArray:
559 oss << "Array_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
560 DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
561 break;
562 case BakerReadBarrierKind::kGcRoot:
563 oss << "GcRoot_r" << BakerReadBarrierFirstRegField::Decode(encoded_data);
564 DCHECK_EQ(kInvalidEncodedReg, BakerReadBarrierSecondRegField::Decode(encoded_data));
565 break;
566 }
567 return oss.str();
568 }
569 }
570 }
571
572 #undef __
573
MaxPositiveDisplacement(const ThunkKey & key)574 uint32_t Arm64RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) {
575 switch (key.GetType()) {
576 case ThunkType::kMethodCall:
577 return kMaxMethodCallPositiveDisplacement;
578 case ThunkType::kBakerReadBarrier:
579 return kMaxBcondPositiveDisplacement;
580 }
581 }
582
MaxNegativeDisplacement(const ThunkKey & key)583 uint32_t Arm64RelativePatcher::MaxNegativeDisplacement(const ThunkKey& key) {
584 switch (key.GetType()) {
585 case ThunkType::kMethodCall:
586 return kMaxMethodCallNegativeDisplacement;
587 case ThunkType::kBakerReadBarrier:
588 return kMaxBcondNegativeDisplacement;
589 }
590 }
591
PatchAdrp(uint32_t adrp,uint32_t disp)592 uint32_t Arm64RelativePatcher::PatchAdrp(uint32_t adrp, uint32_t disp) {
593 return (adrp & 0x9f00001fu) | // Clear offset bits, keep ADRP with destination reg.
594 // Bottom 12 bits are ignored, the next 2 lowest bits are encoded in bits 29-30.
595 ((disp & 0x00003000u) << (29 - 12)) |
596 // The next 16 bits are encoded in bits 5-22.
597 ((disp & 0xffffc000u) >> (12 + 2 - 5)) |
598 // Since the target_offset is based on the beginning of the oat file and the
599 // image space precedes the oat file, the target_offset into image space will
600 // be negative yet passed as uint32_t. Therefore we limit the displacement
601 // to +-2GiB (rather than the maximim +-4GiB) and determine the sign bit from
602 // the highest bit of the displacement. This is encoded in bit 23.
603 ((disp & 0x80000000u) >> (31 - 23));
604 }
605
NeedsErratum843419Thunk(ArrayRef<const uint8_t> code,uint32_t literal_offset,uint32_t patch_offset)606 bool Arm64RelativePatcher::NeedsErratum843419Thunk(ArrayRef<const uint8_t> code,
607 uint32_t literal_offset,
608 uint32_t patch_offset) {
609 DCHECK_EQ(patch_offset & 0x3u, 0u);
610 if ((patch_offset & 0xff8) == 0xff8) { // ...ff8 or ...ffc
611 uint32_t adrp = GetInsn(code, literal_offset);
612 DCHECK_EQ(adrp & 0x9f000000, 0x90000000);
613 uint32_t next_offset = patch_offset + 4u;
614 uint32_t next_insn = GetInsn(code, literal_offset + 4u);
615
616 // Below we avoid patching sequences where the adrp is followed by a load which can easily
617 // be proved to be aligned.
618
619 // First check if the next insn is the LDR using the result of the ADRP.
620 // LDR <Wt>, [<Xn>, #pimm], where <Xn> == ADRP destination reg.
621 if ((next_insn & 0xffc00000) == 0xb9400000 &&
622 (((next_insn >> 5) ^ adrp) & 0x1f) == 0) {
623 return false;
624 }
625
626 // And since LinkerPatch::Type::k{Method,Type,String}Relative is using the result
627 // of the ADRP for an ADD immediate, check for that as well. We generalize a bit
628 // to include ADD/ADDS/SUB/SUBS immediate that either uses the ADRP destination
629 // or stores the result to a different register.
630 if ((next_insn & 0x1f000000) == 0x11000000 &&
631 ((((next_insn >> 5) ^ adrp) & 0x1f) == 0 || ((next_insn ^ adrp) & 0x1f) != 0)) {
632 return false;
633 }
634
635 // LDR <Wt>, <label> is always aligned and thus it doesn't cause boundary crossing.
636 if ((next_insn & 0xff000000) == 0x18000000) {
637 return false;
638 }
639
640 // LDR <Xt>, <label> is aligned iff the pc + displacement is a multiple of 8.
641 if ((next_insn & 0xff000000) == 0x58000000) {
642 bool is_aligned_load = (((next_offset >> 2) ^ (next_insn >> 5)) & 1) == 0;
643 return !is_aligned_load;
644 }
645
646 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned loads, as SP is
647 // guaranteed to be 128-bits aligned and <pimm> is multiple of the load size.
648 if ((next_insn & 0xbfc003e0) == 0xb94003e0) {
649 return false;
650 }
651 return true;
652 }
653 return false;
654 }
655
SetInsn(std::vector<uint8_t> * code,uint32_t offset,uint32_t value)656 void Arm64RelativePatcher::SetInsn(std::vector<uint8_t>* code, uint32_t offset, uint32_t value) {
657 DCHECK_LE(offset + 4u, code->size());
658 DCHECK_EQ(offset & 3u, 0u);
659 uint8_t* addr = &(*code)[offset];
660 addr[0] = (value >> 0) & 0xff;
661 addr[1] = (value >> 8) & 0xff;
662 addr[2] = (value >> 16) & 0xff;
663 addr[3] = (value >> 24) & 0xff;
664 }
665
GetInsn(ArrayRef<const uint8_t> code,uint32_t offset)666 uint32_t Arm64RelativePatcher::GetInsn(ArrayRef<const uint8_t> code, uint32_t offset) {
667 DCHECK_LE(offset + 4u, code.size());
668 DCHECK_EQ(offset & 3u, 0u);
669 const uint8_t* addr = &code[offset];
670 return
671 (static_cast<uint32_t>(addr[0]) << 0) +
672 (static_cast<uint32_t>(addr[1]) << 8) +
673 (static_cast<uint32_t>(addr[2]) << 16)+
674 (static_cast<uint32_t>(addr[3]) << 24);
675 }
676
677 template <typename Alloc>
GetInsn(std::vector<uint8_t,Alloc> * code,uint32_t offset)678 uint32_t Arm64RelativePatcher::GetInsn(std::vector<uint8_t, Alloc>* code, uint32_t offset) {
679 return GetInsn(ArrayRef<const uint8_t>(*code), offset);
680 }
681
682 } // namespace linker
683 } // namespace art
684