1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "linker/relative_patcher_test.h"
18 #include "linker/arm64/relative_patcher_arm64.h"
19
20 namespace art {
21 namespace linker {
22
23 class Arm64RelativePatcherTest : public RelativePatcherTest {
24 public:
Arm64RelativePatcherTest(const std::string & variant)25 explicit Arm64RelativePatcherTest(const std::string& variant)
26 : RelativePatcherTest(kArm64, variant) { }
27
28 protected:
29 static const uint8_t kCallRawCode[];
30 static const ArrayRef<const uint8_t> kCallCode;
31 static const uint8_t kNopRawCode[];
32 static const ArrayRef<const uint8_t> kNopCode;
33
34 // All branches can be created from kBlPlus0 or kBPlus0 by adding the low 26 bits.
35 static constexpr uint32_t kBlPlus0 = 0x94000000u;
36 static constexpr uint32_t kBPlus0 = 0x14000000u;
37
38 // Special BL values.
39 static constexpr uint32_t kBlPlusMax = 0x95ffffffu;
40 static constexpr uint32_t kBlMinusMax = 0x96000000u;
41
42 // LDUR x2, [sp, #4], i.e. unaligned load crossing 64-bit boundary (assuming aligned sp).
43 static constexpr uint32_t kLdurInsn = 0xf840405fu;
44
45 // LDR w12, <label> and LDR x12, <label>. Bits 5-23 contain label displacement in 4-byte units.
46 static constexpr uint32_t kLdrWPcRelInsn = 0x1800000cu;
47 static constexpr uint32_t kLdrXPcRelInsn = 0x5800000cu;
48
49 // LDR w13, [SP, #<pimm>] and LDR x13, [SP, #<pimm>]. Bits 10-21 contain displacement from SP
50 // in units of 4-bytes (for 32-bit load) or 8-bytes (for 64-bit load).
51 static constexpr uint32_t kLdrWSpRelInsn = 0xb94003edu;
52 static constexpr uint32_t kLdrXSpRelInsn = 0xf94003edu;
53
Create2MethodsWithGap(const ArrayRef<const uint8_t> & method1_code,const ArrayRef<const LinkerPatch> & method1_patches,const ArrayRef<const uint8_t> & last_method_code,const ArrayRef<const LinkerPatch> & last_method_patches,uint32_t distance_without_thunks)54 uint32_t Create2MethodsWithGap(const ArrayRef<const uint8_t>& method1_code,
55 const ArrayRef<const LinkerPatch>& method1_patches,
56 const ArrayRef<const uint8_t>& last_method_code,
57 const ArrayRef<const LinkerPatch>& last_method_patches,
58 uint32_t distance_without_thunks) {
59 CHECK_EQ(distance_without_thunks % kArm64Alignment, 0u);
60 const uint32_t method1_offset =
61 CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
62 AddCompiledMethod(MethodRef(1u), method1_code, method1_patches);
63 const uint32_t gap_start =
64 CompiledCode::AlignCode(method1_offset + method1_code.size(), kArm64);
65
66 // We want to put the method3 at a very precise offset.
67 const uint32_t last_method_offset = method1_offset + distance_without_thunks;
68 const uint32_t gap_end = last_method_offset - sizeof(OatQuickMethodHeader);
69 CHECK(IsAligned<kArm64Alignment>(gap_end));
70
71 // Fill the gap with intermediate methods in chunks of 2MiB and the last in [2MiB, 4MiB).
72 // (This allows deduplicating the small chunks to avoid using 256MiB of memory for +-128MiB
73 // offsets by this test.)
74 uint32_t method_idx = 2u;
75 constexpr uint32_t kSmallChunkSize = 2 * MB;
76 std::vector<uint8_t> gap_code;
77 size_t gap_size = gap_end - gap_start;
78 for (; gap_size >= 2u * kSmallChunkSize; gap_size -= kSmallChunkSize) {
79 uint32_t chunk_code_size = kSmallChunkSize - sizeof(OatQuickMethodHeader);
80 gap_code.resize(chunk_code_size, 0u);
81 AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
82 ArrayRef<const LinkerPatch>());
83 method_idx += 1u;
84 }
85 uint32_t chunk_code_size = gap_size - sizeof(OatQuickMethodHeader);
86 gap_code.resize(chunk_code_size, 0u);
87 AddCompiledMethod(MethodRef(method_idx), ArrayRef<const uint8_t>(gap_code),
88 ArrayRef<const LinkerPatch>());
89 method_idx += 1u;
90
91 // Add the last method and link
92 AddCompiledMethod(MethodRef(method_idx), last_method_code, last_method_patches);
93 Link();
94
95 // Check assumptions.
96 CHECK_EQ(GetMethodOffset(1), method1_offset);
97 auto last_result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
98 CHECK(last_result.first);
99 // There may be a thunk before method2.
100 if (last_result.second != last_method_offset) {
101 // Thunk present. Check that there's only one.
102 uint32_t aligned_thunk_size = CompiledCode::AlignCode(ThunkSize(), kArm64);
103 CHECK_EQ(last_result.second, last_method_offset + aligned_thunk_size);
104 }
105 return method_idx;
106 }
107
GetMethodOffset(uint32_t method_idx)108 uint32_t GetMethodOffset(uint32_t method_idx) {
109 auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
110 CHECK(result.first);
111 CHECK_EQ(result.second & 3u, 0u);
112 return result.second;
113 }
114
ThunkSize()115 uint32_t ThunkSize() {
116 return static_cast<Arm64RelativePatcher*>(patcher_.get())->thunk_code_.size();
117 }
118
CheckThunk(uint32_t thunk_offset)119 bool CheckThunk(uint32_t thunk_offset) {
120 Arm64RelativePatcher* patcher = static_cast<Arm64RelativePatcher*>(patcher_.get());
121 ArrayRef<const uint8_t> expected_code(patcher->thunk_code_);
122 if (output_.size() < thunk_offset + expected_code.size()) {
123 LOG(ERROR) << "output_.size() == " << output_.size() << " < "
124 << "thunk_offset + expected_code.size() == " << (thunk_offset + expected_code.size());
125 return false;
126 }
127 ArrayRef<const uint8_t> linked_code(&output_[thunk_offset], expected_code.size());
128 if (linked_code == expected_code) {
129 return true;
130 }
131 // Log failure info.
132 DumpDiff(expected_code, linked_code);
133 return false;
134 }
135
GenNopsAndBl(size_t num_nops,uint32_t bl)136 std::vector<uint8_t> GenNopsAndBl(size_t num_nops, uint32_t bl) {
137 std::vector<uint8_t> result;
138 result.reserve(num_nops * 4u + 4u);
139 for (size_t i = 0; i != num_nops; ++i) {
140 result.insert(result.end(), kNopCode.begin(), kNopCode.end());
141 }
142 result.push_back(static_cast<uint8_t>(bl));
143 result.push_back(static_cast<uint8_t>(bl >> 8));
144 result.push_back(static_cast<uint8_t>(bl >> 16));
145 result.push_back(static_cast<uint8_t>(bl >> 24));
146 return result;
147 }
148
GenNopsAndAdrpLdr(size_t num_nops,uint32_t method_offset,uint32_t target_offset)149 std::vector<uint8_t> GenNopsAndAdrpLdr(size_t num_nops,
150 uint32_t method_offset, uint32_t target_offset) {
151 std::vector<uint8_t> result;
152 result.reserve(num_nops * 4u + 8u);
153 for (size_t i = 0; i != num_nops; ++i) {
154 result.insert(result.end(), kNopCode.begin(), kNopCode.end());
155 }
156 DCHECK_EQ(method_offset & 3u, 0u);
157 DCHECK_EQ(target_offset & 3u, 0u);
158 uint32_t adrp_offset = method_offset + num_nops * 4u;
159 uint32_t disp = target_offset - (adrp_offset & ~0xfffu);
160 DCHECK_EQ(disp & 3u, 0u);
161 uint32_t ldr = 0xb9400001 | // LDR w1, [x0, #(imm12 * 2)]
162 ((disp & 0xfffu) << (10 - 2)); // imm12 = ((disp & 0xfffu) >> 2) is at bit 10.
163 uint32_t adrp = 0x90000000 | // ADRP x0, +SignExtend(immhi:immlo:Zeros(12), 64)
164 ((disp & 0x3000u) << (29 - 12)) | // immlo = ((disp & 0x3000u) >> 12) is at bit 29,
165 ((disp & 0xffffc000) >> (14 - 5)) | // immhi = (disp >> 14) is at bit 5,
166 // We take the sign bit from the disp, limiting disp to +- 2GiB.
167 ((disp & 0x80000000) >> (31 - 23)); // sign bit in immhi is at bit 23.
168 result.push_back(static_cast<uint8_t>(adrp));
169 result.push_back(static_cast<uint8_t>(adrp >> 8));
170 result.push_back(static_cast<uint8_t>(adrp >> 16));
171 result.push_back(static_cast<uint8_t>(adrp >> 24));
172 result.push_back(static_cast<uint8_t>(ldr));
173 result.push_back(static_cast<uint8_t>(ldr >> 8));
174 result.push_back(static_cast<uint8_t>(ldr >> 16));
175 result.push_back(static_cast<uint8_t>(ldr >> 24));
176 return result;
177 }
178
TestNopsAdrpLdr(size_t num_nops,uint32_t dex_cache_arrays_begin,uint32_t element_offset)179 void TestNopsAdrpLdr(size_t num_nops, uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
180 dex_cache_arrays_begin_ = dex_cache_arrays_begin;
181 auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u); // Unpatched.
182 LinkerPatch patches[] = {
183 LinkerPatch::DexCacheArrayPatch(num_nops * 4u , nullptr, num_nops * 4u, element_offset),
184 LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 4u, nullptr, num_nops * 4u, element_offset),
185 };
186 AddCompiledMethod(MethodRef(1u), ArrayRef<const uint8_t>(code),
187 ArrayRef<const LinkerPatch>(patches));
188 Link();
189
190 uint32_t method1_offset = GetMethodOffset(1u);
191 uint32_t target_offset = dex_cache_arrays_begin_ + element_offset;
192 auto expected_code = GenNopsAndAdrpLdr(num_nops, method1_offset, target_offset);
193 EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
194 }
195
InsertInsn(std::vector<uint8_t> * code,size_t pos,uint32_t insn)196 void InsertInsn(std::vector<uint8_t>* code, size_t pos, uint32_t insn) {
197 CHECK_LE(pos, code->size());
198 const uint8_t insn_code[] = {
199 static_cast<uint8_t>(insn), static_cast<uint8_t>(insn >> 8),
200 static_cast<uint8_t>(insn >> 16), static_cast<uint8_t>(insn >> 24),
201 };
202 static_assert(sizeof(insn_code) == 4u, "Invalid sizeof(insn_code).");
203 code->insert(code->begin() + pos, insn_code, insn_code + sizeof(insn_code));
204 }
205
PrepareNopsAdrpInsn2Ldr(size_t num_nops,uint32_t insn2,uint32_t dex_cache_arrays_begin,uint32_t element_offset)206 void PrepareNopsAdrpInsn2Ldr(size_t num_nops, uint32_t insn2,
207 uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
208 dex_cache_arrays_begin_ = dex_cache_arrays_begin;
209 auto code = GenNopsAndAdrpLdr(num_nops, 0u, 0u); // Unpatched.
210 InsertInsn(&code, num_nops * 4u + 4u, insn2);
211 LinkerPatch patches[] = {
212 LinkerPatch::DexCacheArrayPatch(num_nops * 4u , nullptr, num_nops * 4u, element_offset),
213 LinkerPatch::DexCacheArrayPatch(num_nops * 4u + 8u, nullptr, num_nops * 4u, element_offset),
214 };
215 AddCompiledMethod(MethodRef(1u), ArrayRef<const uint8_t>(code),
216 ArrayRef<const LinkerPatch>(patches));
217 Link();
218 }
219
TestNopsAdrpInsn2Ldr(size_t num_nops,uint32_t insn2,uint32_t dex_cache_arrays_begin,uint32_t element_offset)220 void TestNopsAdrpInsn2Ldr(size_t num_nops, uint32_t insn2,
221 uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
222 PrepareNopsAdrpInsn2Ldr(num_nops, insn2, dex_cache_arrays_begin, element_offset);
223
224 uint32_t method1_offset = GetMethodOffset(1u);
225 uint32_t target_offset = dex_cache_arrays_begin_ + element_offset;
226 auto expected_code = GenNopsAndAdrpLdr(num_nops, method1_offset, target_offset);
227 InsertInsn(&expected_code, num_nops * 4u + 4u, insn2);
228 EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
229 }
230
TestNopsAdrpInsn2LdrHasThunk(size_t num_nops,uint32_t insn2,uint32_t dex_cache_arrays_begin,uint32_t element_offset)231 void TestNopsAdrpInsn2LdrHasThunk(size_t num_nops, uint32_t insn2,
232 uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
233 PrepareNopsAdrpInsn2Ldr(num_nops, insn2, dex_cache_arrays_begin, element_offset);
234
235 uint32_t method1_offset = GetMethodOffset(1u);
236 CHECK(!compiled_method_refs_.empty());
237 CHECK_EQ(compiled_method_refs_[0].dex_method_index, 1u);
238 CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size());
239 uint32_t method1_size = compiled_methods_[0]->GetQuickCode()->size();
240 uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64);
241 uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u);
242 ASSERT_EQ(b_diff & 3u, 0u);
243 ASSERT_LT(b_diff, 128 * MB);
244 uint32_t b_out = kBPlus0 + ((b_diff >> 2) & 0x03ffffffu);
245 uint32_t b_in = kBPlus0 + ((-b_diff >> 2) & 0x03ffffffu);
246
247 uint32_t target_offset = dex_cache_arrays_begin_ + element_offset;
248 auto expected_code = GenNopsAndAdrpLdr(num_nops, method1_offset, target_offset);
249 InsertInsn(&expected_code, num_nops * 4u + 4u, insn2);
250 // Replace adrp with bl.
251 expected_code.erase(expected_code.begin() + num_nops * 4u,
252 expected_code.begin() + num_nops * 4u + 4u);
253 InsertInsn(&expected_code, num_nops * 4u, b_out);
254 EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
255
256 auto expected_thunk_code = GenNopsAndAdrpLdr(0u, thunk_offset, target_offset);
257 ASSERT_EQ(expected_thunk_code.size(), 8u);
258 expected_thunk_code.erase(expected_thunk_code.begin() + 4u, expected_thunk_code.begin() + 8u);
259 InsertInsn(&expected_thunk_code, 4u, b_in);
260 ASSERT_EQ(expected_thunk_code.size(), 8u);
261
262 uint32_t thunk_size = ThunkSize();
263 ASSERT_EQ(thunk_offset + thunk_size, output_.size());
264 ASSERT_EQ(thunk_size, expected_thunk_code.size());
265 ArrayRef<const uint8_t> thunk_code(&output_[thunk_offset], thunk_size);
266 if (ArrayRef<const uint8_t>(expected_thunk_code) != thunk_code) {
267 DumpDiff(ArrayRef<const uint8_t>(expected_thunk_code), thunk_code);
268 FAIL();
269 }
270 }
271
TestAdrpInsn2Ldr(uint32_t insn2,uint32_t adrp_offset,bool has_thunk,uint32_t dex_cache_arrays_begin,uint32_t element_offset)272 void TestAdrpInsn2Ldr(uint32_t insn2, uint32_t adrp_offset, bool has_thunk,
273 uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
274 uint32_t method1_offset =
275 CompiledCode::AlignCode(kTrampolineSize, kArm64) + sizeof(OatQuickMethodHeader);
276 ASSERT_LT(method1_offset, adrp_offset);
277 ASSERT_EQ(adrp_offset & 3u, 0u);
278 uint32_t num_nops = (adrp_offset - method1_offset) / 4u;
279 if (has_thunk) {
280 TestNopsAdrpInsn2LdrHasThunk(num_nops, insn2, dex_cache_arrays_begin, element_offset);
281 } else {
282 TestNopsAdrpInsn2Ldr(num_nops, insn2, dex_cache_arrays_begin, element_offset);
283 }
284 ASSERT_EQ(method1_offset, GetMethodOffset(1u)); // If this fails, num_nops is wrong.
285 }
286
TestAdrpLdurLdr(uint32_t adrp_offset,bool has_thunk,uint32_t dex_cache_arrays_begin,uint32_t element_offset)287 void TestAdrpLdurLdr(uint32_t adrp_offset, bool has_thunk,
288 uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
289 TestAdrpInsn2Ldr(kLdurInsn, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset);
290 }
291
TestAdrpLdrPcRelLdr(uint32_t pcrel_ldr_insn,int32_t pcrel_disp,uint32_t adrp_offset,bool has_thunk,uint32_t dex_cache_arrays_begin,uint32_t element_offset)292 void TestAdrpLdrPcRelLdr(uint32_t pcrel_ldr_insn, int32_t pcrel_disp,
293 uint32_t adrp_offset, bool has_thunk,
294 uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
295 ASSERT_LT(pcrel_disp, 0x100000);
296 ASSERT_GE(pcrel_disp, -0x100000);
297 ASSERT_EQ(pcrel_disp & 0x3, 0);
298 uint32_t insn2 = pcrel_ldr_insn | (((static_cast<uint32_t>(pcrel_disp) >> 2) & 0x7ffffu) << 5);
299 TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset);
300 }
301
TestAdrpLdrSpRelLdr(uint32_t sprel_ldr_insn,uint32_t sprel_disp_in_load_units,uint32_t adrp_offset,bool has_thunk,uint32_t dex_cache_arrays_begin,uint32_t element_offset)302 void TestAdrpLdrSpRelLdr(uint32_t sprel_ldr_insn, uint32_t sprel_disp_in_load_units,
303 uint32_t adrp_offset, bool has_thunk,
304 uint32_t dex_cache_arrays_begin, uint32_t element_offset) {
305 ASSERT_LT(sprel_disp_in_load_units, 0x1000u);
306 uint32_t insn2 = sprel_ldr_insn | ((sprel_disp_in_load_units & 0xfffu) << 10);
307 TestAdrpInsn2Ldr(insn2, adrp_offset, has_thunk, dex_cache_arrays_begin, element_offset);
308 }
309 };
310
311 const uint8_t Arm64RelativePatcherTest::kCallRawCode[] = {
312 0x00, 0x00, 0x00, 0x94
313 };
314
315 const ArrayRef<const uint8_t> Arm64RelativePatcherTest::kCallCode(kCallRawCode);
316
317 const uint8_t Arm64RelativePatcherTest::kNopRawCode[] = {
318 0x1f, 0x20, 0x03, 0xd5
319 };
320
321 const ArrayRef<const uint8_t> Arm64RelativePatcherTest::kNopCode(kNopRawCode);
322
323 class Arm64RelativePatcherTestDefault : public Arm64RelativePatcherTest {
324 public:
Arm64RelativePatcherTestDefault()325 Arm64RelativePatcherTestDefault() : Arm64RelativePatcherTest("default") { }
326 };
327
328 class Arm64RelativePatcherTestDenver64 : public Arm64RelativePatcherTest {
329 public:
Arm64RelativePatcherTestDenver64()330 Arm64RelativePatcherTestDenver64() : Arm64RelativePatcherTest("denver64") { }
331 };
332
TEST_F(Arm64RelativePatcherTestDefault,CallSelf)333 TEST_F(Arm64RelativePatcherTestDefault, CallSelf) {
334 LinkerPatch patches[] = {
335 LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
336 };
337 AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
338 Link();
339
340 static const uint8_t expected_code[] = {
341 0x00, 0x00, 0x00, 0x94
342 };
343 EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
344 }
345
TEST_F(Arm64RelativePatcherTestDefault,CallOther)346 TEST_F(Arm64RelativePatcherTestDefault, CallOther) {
347 LinkerPatch method1_patches[] = {
348 LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
349 };
350 AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(method1_patches));
351 LinkerPatch method2_patches[] = {
352 LinkerPatch::RelativeCodePatch(0u, nullptr, 1u),
353 };
354 AddCompiledMethod(MethodRef(2u), kCallCode, ArrayRef<const LinkerPatch>(method2_patches));
355 Link();
356
357 uint32_t method1_offset = GetMethodOffset(1u);
358 uint32_t method2_offset = GetMethodOffset(2u);
359 uint32_t diff_after = method2_offset - method1_offset;
360 ASSERT_EQ(diff_after & 3u, 0u);
361 ASSERT_LT(diff_after >> 2, 1u << 8); // Simple encoding, (diff_after >> 2) fits into 8 bits.
362 static const uint8_t method1_expected_code[] = {
363 static_cast<uint8_t>(diff_after >> 2), 0x00, 0x00, 0x94
364 };
365 EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(method1_expected_code)));
366 uint32_t diff_before = method1_offset - method2_offset;
367 ASSERT_EQ(diff_before & 3u, 0u);
368 ASSERT_GE(diff_before, -1u << 27);
369 auto method2_expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff_before >> 2) & 0x03ffffffu));
370 EXPECT_TRUE(CheckLinkedMethod(MethodRef(2u), ArrayRef<const uint8_t>(method2_expected_code)));
371 }
372
TEST_F(Arm64RelativePatcherTestDefault,CallTrampoline)373 TEST_F(Arm64RelativePatcherTestDefault, CallTrampoline) {
374 LinkerPatch patches[] = {
375 LinkerPatch::RelativeCodePatch(0u, nullptr, 2u),
376 };
377 AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
378 Link();
379
380 uint32_t method1_offset = GetMethodOffset(1u);
381 uint32_t diff = kTrampolineOffset - method1_offset;
382 ASSERT_EQ(diff & 1u, 0u);
383 ASSERT_GE(diff, -1u << 9); // Simple encoding, -256 <= (diff >> 1) < 0 (checked as unsigned).
384 auto expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff >> 2) & 0x03ffffffu));
385 EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
386 }
387
TEST_F(Arm64RelativePatcherTestDefault,CallOtherAlmostTooFarAfter)388 TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarAfter) {
389 auto method1_raw_code = GenNopsAndBl(1u, kBlPlus0);
390 constexpr uint32_t bl_offset_in_method1 = 1u * 4u; // After NOPs.
391 ArrayRef<const uint8_t> method1_code(method1_raw_code);
392 ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size());
393 uint32_t expected_last_method_idx = 65; // Based on 2MiB chunks in Create2MethodsWithGap().
394 LinkerPatch method1_patches[] = {
395 LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, expected_last_method_idx),
396 };
397
398 constexpr uint32_t max_positive_disp = 128 * MB - 4u;
399 uint32_t last_method_idx = Create2MethodsWithGap(method1_code, method1_patches,
400 kNopCode, ArrayRef<const LinkerPatch>(),
401 bl_offset_in_method1 + max_positive_disp);
402 ASSERT_EQ(expected_last_method_idx, last_method_idx);
403
404 uint32_t method1_offset = GetMethodOffset(1u);
405 uint32_t last_method_offset = GetMethodOffset(last_method_idx);
406 ASSERT_EQ(method1_offset + bl_offset_in_method1 + max_positive_disp, last_method_offset);
407
408 // Check linked code.
409 auto expected_code = GenNopsAndBl(1u, kBlPlusMax);
410 EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
411 }
412
TEST_F(Arm64RelativePatcherTestDefault,CallOtherAlmostTooFarBefore)413 TEST_F(Arm64RelativePatcherTestDefault, CallOtherAlmostTooFarBefore) {
414 auto last_method_raw_code = GenNopsAndBl(0u, kBlPlus0);
415 constexpr uint32_t bl_offset_in_last_method = 0u * 4u; // After NOPs.
416 ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
417 ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
418 LinkerPatch last_method_patches[] = {
419 LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, 1u),
420 };
421
422 constexpr uint32_t max_negative_disp = 128 * MB;
423 uint32_t last_method_idx = Create2MethodsWithGap(kNopCode, ArrayRef<const LinkerPatch>(),
424 last_method_code, last_method_patches,
425 max_negative_disp - bl_offset_in_last_method);
426 uint32_t method1_offset = GetMethodOffset(1u);
427 uint32_t last_method_offset = GetMethodOffset(last_method_idx);
428 ASSERT_EQ(method1_offset, last_method_offset + bl_offset_in_last_method - max_negative_disp);
429
430 // Check linked code.
431 auto expected_code = GenNopsAndBl(0u, kBlMinusMax);
432 EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
433 ArrayRef<const uint8_t>(expected_code)));
434 }
435
TEST_F(Arm64RelativePatcherTestDefault,CallOtherJustTooFarAfter)436 TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) {
437 auto method1_raw_code = GenNopsAndBl(0u, kBlPlus0);
438 constexpr uint32_t bl_offset_in_method1 = 0u * 4u; // After NOPs.
439 ArrayRef<const uint8_t> method1_code(method1_raw_code);
440 ASSERT_EQ(bl_offset_in_method1 + 4u, method1_code.size());
441 uint32_t expected_last_method_idx = 65; // Based on 2MiB chunks in Create2MethodsWithGap().
442 LinkerPatch method1_patches[] = {
443 LinkerPatch::RelativeCodePatch(bl_offset_in_method1, nullptr, expected_last_method_idx),
444 };
445
446 constexpr uint32_t just_over_max_positive_disp = 128 * MB;
447 uint32_t last_method_idx = Create2MethodsWithGap(
448 method1_code, method1_patches, kNopCode, ArrayRef<const LinkerPatch>(),
449 bl_offset_in_method1 + just_over_max_positive_disp);
450 ASSERT_EQ(expected_last_method_idx, last_method_idx);
451
452 uint32_t method1_offset = GetMethodOffset(1u);
453 uint32_t last_method_offset = GetMethodOffset(last_method_idx);
454 uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
455 ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_header_offset));
456 uint32_t thunk_offset = last_method_header_offset - CompiledCode::AlignCode(ThunkSize(), kArm64);
457 ASSERT_TRUE(IsAligned<kArm64Alignment>(thunk_offset));
458 uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
459 ASSERT_EQ(diff & 3u, 0u);
460 ASSERT_LT(diff, 128 * MB);
461 auto expected_code = GenNopsAndBl(0u, kBlPlus0 | (diff >> 2));
462 EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
463 CheckThunk(thunk_offset);
464 }
465
TEST_F(Arm64RelativePatcherTestDefault,CallOtherJustTooFarBefore)466 TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarBefore) {
467 auto last_method_raw_code = GenNopsAndBl(1u, kBlPlus0);
468 constexpr uint32_t bl_offset_in_last_method = 1u * 4u; // After NOPs.
469 ArrayRef<const uint8_t> last_method_code(last_method_raw_code);
470 ASSERT_EQ(bl_offset_in_last_method + 4u, last_method_code.size());
471 LinkerPatch last_method_patches[] = {
472 LinkerPatch::RelativeCodePatch(bl_offset_in_last_method, nullptr, 1u),
473 };
474
475 constexpr uint32_t just_over_max_negative_disp = 128 * MB + 4;
476 uint32_t last_method_idx = Create2MethodsWithGap(
477 kNopCode, ArrayRef<const LinkerPatch>(), last_method_code, last_method_patches,
478 just_over_max_negative_disp - bl_offset_in_last_method);
479 uint32_t method1_offset = GetMethodOffset(1u);
480 uint32_t last_method_offset = GetMethodOffset(last_method_idx);
481 ASSERT_EQ(method1_offset,
482 last_method_offset + bl_offset_in_last_method - just_over_max_negative_disp);
483
484 // Check linked code.
485 uint32_t thunk_offset =
486 CompiledCode::AlignCode(last_method_offset + last_method_code.size(), kArm64);
487 uint32_t diff = thunk_offset - (last_method_offset + bl_offset_in_last_method);
488 ASSERT_EQ(diff & 3u, 0u);
489 ASSERT_LT(diff, 128 * MB);
490 auto expected_code = GenNopsAndBl(1u, kBlPlus0 | (diff >> 2));
491 EXPECT_TRUE(CheckLinkedMethod(MethodRef(last_method_idx),
492 ArrayRef<const uint8_t>(expected_code)));
493 EXPECT_TRUE(CheckThunk(thunk_offset));
494 }
495
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference1)496 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference1) {
497 TestNopsAdrpLdr(0u, 0x12345678u, 0x1234u);
498 }
499
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference2)500 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference2) {
501 TestNopsAdrpLdr(0u, -0x12345678u, 0x4444u);
502 }
503
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference3)504 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference3) {
505 TestNopsAdrpLdr(0u, 0x12345000u, 0x3ffcu);
506 }
507
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference4)508 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference4) {
509 TestNopsAdrpLdr(0u, 0x12345000u, 0x4000u);
510 }
511
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference0xff4)512 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference0xff4) {
513 TestAdrpLdurLdr(0xff4u, false, 0x12345678u, 0x1234u);
514 }
515
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference0xff8)516 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference0xff8) {
517 TestAdrpLdurLdr(0xff8u, true, 0x12345678u, 0x1234u);
518 }
519
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference0xffc)520 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference0xffc) {
521 TestAdrpLdurLdr(0xffcu, true, 0x12345678u, 0x1234u);
522 }
523
TEST_F(Arm64RelativePatcherTestDefault,DexCacheReference0x1000)524 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference0x1000) {
525 TestAdrpLdurLdr(0x1000u, false, 0x12345678u, 0x1234u);
526 }
527
TEST_F(Arm64RelativePatcherTestDenver64,DexCacheReference0xff4)528 TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference0xff4) {
529 TestAdrpLdurLdr(0xff4u, false, 0x12345678u, 0x1234u);
530 }
531
TEST_F(Arm64RelativePatcherTestDenver64,DexCacheReference0xff8)532 TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference0xff8) {
533 TestAdrpLdurLdr(0xff8u, false, 0x12345678u, 0x1234u);
534 }
535
TEST_F(Arm64RelativePatcherTestDenver64,DexCacheReference0xffc)536 TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference0xffc) {
537 TestAdrpLdurLdr(0xffcu, false, 0x12345678u, 0x1234u);
538 }
539
TEST_F(Arm64RelativePatcherTestDenver64,DexCacheReference0x1000)540 TEST_F(Arm64RelativePatcherTestDenver64, DexCacheReference0x1000) {
541 TestAdrpLdurLdr(0x1000u, false, 0x12345678u, 0x1234u);
542 }
543
544 #define TEST_FOR_OFFSETS(test, disp1, disp2) \
545 test(0xff4u, disp1) test(0xff8u, disp1) test(0xffcu, disp1) test(0x1000u, disp1) \
546 test(0xff4u, disp2) test(0xff8u, disp2) test(0xffcu, disp2) test(0x1000u, disp2)
547
548 // LDR <Wt>, <label> is always aligned. We should never have to use a fixup.
549 #define LDRW_PCREL_TEST(adrp_offset, disp) \
550 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## WPcRel ## disp) { \
551 TestAdrpLdrPcRelLdr(kLdrWPcRelInsn, disp, adrp_offset, false, 0x12345678u, 0x1234u); \
552 }
553
554 TEST_FOR_OFFSETS(LDRW_PCREL_TEST, 0x1234, 0x1238)
555
556 // LDR <Xt>, <label> is aligned when offset + displacement is a multiple of 8.
557 #define LDRX_PCREL_TEST(adrp_offset, disp) \
558 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XPcRel ## disp) { \
559 bool unaligned = ((adrp_offset + 4u + static_cast<uint32_t>(disp)) & 7u) != 0; \
560 bool has_thunk = (adrp_offset == 0xff8u || adrp_offset == 0xffcu) && unaligned; \
561 TestAdrpLdrPcRelLdr(kLdrXPcRelInsn, disp, adrp_offset, has_thunk, 0x12345678u, 0x1234u); \
562 }
563
564 TEST_FOR_OFFSETS(LDRX_PCREL_TEST, 0x1234, 0x1238)
565
566 // LDR <Wt>, [SP, #<pimm>] and LDR <Xt>, [SP, #<pimm>] are always aligned. No fixup needed.
567 #define LDRW_SPREL_TEST(adrp_offset, disp) \
568 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## WSpRel ## disp) { \
569 TestAdrpLdrSpRelLdr(kLdrWSpRelInsn, disp >> 2, adrp_offset, false, 0x12345678u, 0x1234u); \
570 }
571
572 TEST_FOR_OFFSETS(LDRW_SPREL_TEST, 0, 4)
573
574 #define LDRX_SPREL_TEST(adrp_offset, disp) \
575 TEST_F(Arm64RelativePatcherTestDefault, DexCacheReference ## adrp_offset ## XSpRel ## disp) { \
576 TestAdrpLdrSpRelLdr(kLdrXSpRelInsn, disp >> 3, adrp_offset, false, 0x12345678u, 0x1234u); \
577 }
578
579 TEST_FOR_OFFSETS(LDRX_SPREL_TEST, 0, 8)
580
581 } // namespace linker
582 } // namespace art
583