1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "dex/compiler_internals.h"
18 #include "dex_file-inl.h"
19 #include "gc_map.h"
20 #include "gc_map_builder.h"
21 #include "mapping_table.h"
22 #include "mir_to_lir-inl.h"
23 #include "dex/quick/dex_file_method_inliner.h"
24 #include "dex/quick/dex_file_to_method_inliner_map.h"
25 #include "dex/verification_results.h"
26 #include "dex/verified_method.h"
27 #include "verifier/dex_gc_map.h"
28 #include "verifier/method_verifier.h"
29 #include "vmap_table.h"
30
31 namespace art {
32
33 namespace {
34
35 /* Dump a mapping table */
36 template <typename It>
DumpMappingTable(const char * table_name,const char * descriptor,const char * name,const Signature & signature,uint32_t size,It first)37 void DumpMappingTable(const char* table_name, const char* descriptor, const char* name,
38 const Signature& signature, uint32_t size, It first) {
39 if (size != 0) {
40 std::string line(StringPrintf("\n %s %s%s_%s_table[%u] = {", table_name,
41 descriptor, name, signature.ToString().c_str(), size));
42 std::replace(line.begin(), line.end(), ';', '_');
43 LOG(INFO) << line;
44 for (uint32_t i = 0; i != size; ++i) {
45 line = StringPrintf(" {0x%05x, 0x%04x},", first.NativePcOffset(), first.DexPc());
46 ++first;
47 LOG(INFO) << line;
48 }
49 LOG(INFO) <<" };\n\n";
50 }
51 }
52
53 } // anonymous namespace
54
IsInexpensiveConstant(RegLocation rl_src)55 bool Mir2Lir::IsInexpensiveConstant(RegLocation rl_src) {
56 bool res = false;
57 if (rl_src.is_const) {
58 if (rl_src.wide) {
59 // For wide registers, check whether we're the high partner. In that case we need to switch
60 // to the lower one for the correct value.
61 if (rl_src.high_word) {
62 rl_src.high_word = false;
63 rl_src.s_reg_low--;
64 rl_src.orig_sreg--;
65 }
66 if (rl_src.fp) {
67 res = InexpensiveConstantDouble(mir_graph_->ConstantValueWide(rl_src));
68 } else {
69 res = InexpensiveConstantLong(mir_graph_->ConstantValueWide(rl_src));
70 }
71 } else {
72 if (rl_src.fp) {
73 res = InexpensiveConstantFloat(mir_graph_->ConstantValue(rl_src));
74 } else {
75 res = InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src));
76 }
77 }
78 }
79 return res;
80 }
81
MarkSafepointPC(LIR * inst)82 void Mir2Lir::MarkSafepointPC(LIR* inst) {
83 DCHECK(!inst->flags.use_def_invalid);
84 inst->u.m.def_mask = &kEncodeAll;
85 LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
86 DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
87 }
88
MarkSafepointPCAfter(LIR * after)89 void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
90 DCHECK(!after->flags.use_def_invalid);
91 after->u.m.def_mask = &kEncodeAll;
92 // As NewLIR0 uses Append, we need to create the LIR by hand.
93 LIR* safepoint_pc = RawLIR(current_dalvik_offset_, kPseudoSafepointPC);
94 if (after->next == nullptr) {
95 DCHECK_EQ(after, last_lir_insn_);
96 AppendLIR(safepoint_pc);
97 } else {
98 InsertLIRAfter(after, safepoint_pc);
99 }
100 DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
101 }
102
103 /* Remove a LIR from the list. */
UnlinkLIR(LIR * lir)104 void Mir2Lir::UnlinkLIR(LIR* lir) {
105 if (UNLIKELY(lir == first_lir_insn_)) {
106 first_lir_insn_ = lir->next;
107 if (lir->next != NULL) {
108 lir->next->prev = NULL;
109 } else {
110 DCHECK(lir->next == NULL);
111 DCHECK(lir == last_lir_insn_);
112 last_lir_insn_ = NULL;
113 }
114 } else if (lir == last_lir_insn_) {
115 last_lir_insn_ = lir->prev;
116 lir->prev->next = NULL;
117 } else if ((lir->prev != NULL) && (lir->next != NULL)) {
118 lir->prev->next = lir->next;
119 lir->next->prev = lir->prev;
120 }
121 }
122
123 /* Convert an instruction to a NOP */
NopLIR(LIR * lir)124 void Mir2Lir::NopLIR(LIR* lir) {
125 lir->flags.is_nop = true;
126 if (!cu_->verbose) {
127 UnlinkLIR(lir);
128 }
129 }
130
SetMemRefType(LIR * lir,bool is_load,int mem_type)131 void Mir2Lir::SetMemRefType(LIR* lir, bool is_load, int mem_type) {
132 DCHECK(GetTargetInstFlags(lir->opcode) & (IS_LOAD | IS_STORE));
133 DCHECK(!lir->flags.use_def_invalid);
134 // TODO: Avoid the extra Arena allocation!
135 const ResourceMask** mask_ptr;
136 ResourceMask mask;
137 if (is_load) {
138 mask_ptr = &lir->u.m.use_mask;
139 } else {
140 mask_ptr = &lir->u.m.def_mask;
141 }
142 mask = **mask_ptr;
143 /* Clear out the memref flags */
144 mask.ClearBits(kEncodeMem);
145 /* ..and then add back the one we need */
146 switch (mem_type) {
147 case ResourceMask::kLiteral:
148 DCHECK(is_load);
149 mask.SetBit(ResourceMask::kLiteral);
150 break;
151 case ResourceMask::kDalvikReg:
152 mask.SetBit(ResourceMask::kDalvikReg);
153 break;
154 case ResourceMask::kHeapRef:
155 mask.SetBit(ResourceMask::kHeapRef);
156 break;
157 case ResourceMask::kMustNotAlias:
158 /* Currently only loads can be marked as kMustNotAlias */
159 DCHECK(!(GetTargetInstFlags(lir->opcode) & IS_STORE));
160 mask.SetBit(ResourceMask::kMustNotAlias);
161 break;
162 default:
163 LOG(FATAL) << "Oat: invalid memref kind - " << mem_type;
164 }
165 *mask_ptr = mask_cache_.GetMask(mask);
166 }
167
168 /*
169 * Mark load/store instructions that access Dalvik registers through the stack.
170 */
AnnotateDalvikRegAccess(LIR * lir,int reg_id,bool is_load,bool is64bit)171 void Mir2Lir::AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load,
172 bool is64bit) {
173 DCHECK((is_load ? lir->u.m.use_mask : lir->u.m.def_mask)->Intersection(kEncodeMem).Equals(
174 kEncodeDalvikReg));
175
176 /*
177 * Store the Dalvik register id in alias_info. Mark the MSB if it is a 64-bit
178 * access.
179 */
180 lir->flags.alias_info = ENCODE_ALIAS_INFO(reg_id, is64bit);
181 }
182
183 /*
184 * Debugging macros
185 */
186 #define DUMP_RESOURCE_MASK(X)
187
188 /* Pretty-print a LIR instruction */
DumpLIRInsn(LIR * lir,unsigned char * base_addr)189 void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) {
190 int offset = lir->offset;
191 int dest = lir->operands[0];
192 const bool dump_nop = (cu_->enable_debug & (1 << kDebugShowNops));
193
194 /* Handle pseudo-ops individually, and all regular insns as a group */
195 switch (lir->opcode) {
196 case kPseudoMethodEntry:
197 LOG(INFO) << "-------- method entry "
198 << PrettyMethod(cu_->method_idx, *cu_->dex_file);
199 break;
200 case kPseudoMethodExit:
201 LOG(INFO) << "-------- Method_Exit";
202 break;
203 case kPseudoBarrier:
204 LOG(INFO) << "-------- BARRIER";
205 break;
206 case kPseudoEntryBlock:
207 LOG(INFO) << "-------- entry offset: 0x" << std::hex << dest;
208 break;
209 case kPseudoDalvikByteCodeBoundary:
210 if (lir->operands[0] == 0) {
211 // NOTE: only used for debug listings.
212 lir->operands[0] = WrapPointer(ArenaStrdup("No instruction string"));
213 }
214 LOG(INFO) << "-------- dalvik offset: 0x" << std::hex
215 << lir->dalvik_offset << " @ "
216 << reinterpret_cast<char*>(UnwrapPointer(lir->operands[0]));
217 break;
218 case kPseudoExitBlock:
219 LOG(INFO) << "-------- exit offset: 0x" << std::hex << dest;
220 break;
221 case kPseudoPseudoAlign4:
222 LOG(INFO) << reinterpret_cast<uintptr_t>(base_addr) + offset << " (0x" << std::hex
223 << offset << "): .align4";
224 break;
225 case kPseudoEHBlockLabel:
226 LOG(INFO) << "Exception_Handling:";
227 break;
228 case kPseudoTargetLabel:
229 case kPseudoNormalBlockLabel:
230 LOG(INFO) << "L" << reinterpret_cast<void*>(lir) << ":";
231 break;
232 case kPseudoThrowTarget:
233 LOG(INFO) << "LT" << reinterpret_cast<void*>(lir) << ":";
234 break;
235 case kPseudoIntrinsicRetry:
236 LOG(INFO) << "IR" << reinterpret_cast<void*>(lir) << ":";
237 break;
238 case kPseudoSuspendTarget:
239 LOG(INFO) << "LS" << reinterpret_cast<void*>(lir) << ":";
240 break;
241 case kPseudoSafepointPC:
242 LOG(INFO) << "LsafepointPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
243 break;
244 case kPseudoExportedPC:
245 LOG(INFO) << "LexportedPC_0x" << std::hex << lir->offset << "_" << lir->dalvik_offset << ":";
246 break;
247 case kPseudoCaseLabel:
248 LOG(INFO) << "LC" << reinterpret_cast<void*>(lir) << ": Case target 0x"
249 << std::hex << lir->operands[0] << "|" << std::dec <<
250 lir->operands[0];
251 break;
252 default:
253 if (lir->flags.is_nop && !dump_nop) {
254 break;
255 } else {
256 std::string op_name(BuildInsnString(GetTargetInstName(lir->opcode),
257 lir, base_addr));
258 std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
259 lir, base_addr));
260 LOG(INFO) << StringPrintf("%5p: %-9s%s%s",
261 base_addr + offset,
262 op_name.c_str(), op_operands.c_str(),
263 lir->flags.is_nop ? "(nop)" : "");
264 }
265 break;
266 }
267
268 if (lir->u.m.use_mask && (!lir->flags.is_nop || dump_nop)) {
269 DUMP_RESOURCE_MASK(DumpResourceMask(lir, *lir->u.m.use_mask, "use"));
270 }
271 if (lir->u.m.def_mask && (!lir->flags.is_nop || dump_nop)) {
272 DUMP_RESOURCE_MASK(DumpResourceMask(lir, *lir->u.m.def_mask, "def"));
273 }
274 }
275
DumpPromotionMap()276 void Mir2Lir::DumpPromotionMap() {
277 int num_regs = cu_->num_dalvik_registers + mir_graph_->GetNumUsedCompilerTemps();
278 for (int i = 0; i < num_regs; i++) {
279 PromotionMap v_reg_map = promotion_map_[i];
280 std::string buf;
281 if (v_reg_map.fp_location == kLocPhysReg) {
282 StringAppendF(&buf, " : s%d", RegStorage::RegNum(v_reg_map.fp_reg));
283 }
284
285 std::string buf3;
286 if (i < cu_->num_dalvik_registers) {
287 StringAppendF(&buf3, "%02d", i);
288 } else if (i == mir_graph_->GetMethodSReg()) {
289 buf3 = "Method*";
290 } else {
291 StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers);
292 }
293
294 LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
295 v_reg_map.core_location == kLocPhysReg ?
296 "r" : "SP+", v_reg_map.core_location == kLocPhysReg ?
297 v_reg_map.core_reg : SRegOffset(i),
298 buf.c_str());
299 }
300 }
301
UpdateLIROffsets()302 void Mir2Lir::UpdateLIROffsets() {
303 // Only used for code listings.
304 size_t offset = 0;
305 for (LIR* lir = first_lir_insn_; lir != nullptr; lir = lir->next) {
306 lir->offset = offset;
307 if (!lir->flags.is_nop && !IsPseudoLirOp(lir->opcode)) {
308 offset += GetInsnSize(lir);
309 } else if (lir->opcode == kPseudoPseudoAlign4) {
310 offset += (offset & 0x2);
311 }
312 }
313 }
314
315 /* Dump instructions and constant pool contents */
CodegenDump()316 void Mir2Lir::CodegenDump() {
317 LOG(INFO) << "Dumping LIR insns for "
318 << PrettyMethod(cu_->method_idx, *cu_->dex_file);
319 LIR* lir_insn;
320 int insns_size = cu_->code_item->insns_size_in_code_units_;
321
322 LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs;
323 LOG(INFO) << "Ins : " << cu_->num_ins;
324 LOG(INFO) << "Outs : " << cu_->num_outs;
325 LOG(INFO) << "CoreSpills : " << num_core_spills_;
326 LOG(INFO) << "FPSpills : " << num_fp_spills_;
327 LOG(INFO) << "CompilerTemps : " << mir_graph_->GetNumUsedCompilerTemps();
328 LOG(INFO) << "Frame size : " << frame_size_;
329 LOG(INFO) << "code size is " << total_size_ <<
330 " bytes, Dalvik size is " << insns_size * 2;
331 LOG(INFO) << "expansion factor: "
332 << static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
333 DumpPromotionMap();
334 UpdateLIROffsets();
335 for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
336 DumpLIRInsn(lir_insn, 0);
337 }
338 for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
339 LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
340 lir_insn->operands[0]);
341 }
342
343 const DexFile::MethodId& method_id =
344 cu_->dex_file->GetMethodId(cu_->method_idx);
345 const Signature signature = cu_->dex_file->GetMethodSignature(method_id);
346 const char* name = cu_->dex_file->GetMethodName(method_id);
347 const char* descriptor(cu_->dex_file->GetMethodDeclaringClassDescriptor(method_id));
348
349 // Dump mapping tables
350 if (!encoded_mapping_table_.empty()) {
351 MappingTable table(&encoded_mapping_table_[0]);
352 DumpMappingTable("PC2Dex_MappingTable", descriptor, name, signature,
353 table.PcToDexSize(), table.PcToDexBegin());
354 DumpMappingTable("Dex2PC_MappingTable", descriptor, name, signature,
355 table.DexToPcSize(), table.DexToPcBegin());
356 }
357 }
358
359 /*
360 * Search the existing constants in the literal pool for an exact or close match
361 * within specified delta (greater or equal to 0).
362 */
ScanLiteralPool(LIR * data_target,int value,unsigned int delta)363 LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) {
364 while (data_target) {
365 if ((static_cast<unsigned>(value - data_target->operands[0])) <= delta)
366 return data_target;
367 data_target = data_target->next;
368 }
369 return NULL;
370 }
371
372 /* Search the existing constants in the literal pool for an exact wide match */
ScanLiteralPoolWide(LIR * data_target,int val_lo,int val_hi)373 LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
374 bool lo_match = false;
375 LIR* lo_target = NULL;
376 while (data_target) {
377 if (lo_match && (data_target->operands[0] == val_hi)) {
378 // Record high word in case we need to expand this later.
379 lo_target->operands[1] = val_hi;
380 return lo_target;
381 }
382 lo_match = false;
383 if (data_target->operands[0] == val_lo) {
384 lo_match = true;
385 lo_target = data_target;
386 }
387 data_target = data_target->next;
388 }
389 return NULL;
390 }
391
392 /* Search the existing constants in the literal pool for an exact method match */
ScanLiteralPoolMethod(LIR * data_target,const MethodReference & method)393 LIR* Mir2Lir::ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method) {
394 while (data_target) {
395 if (static_cast<uint32_t>(data_target->operands[0]) == method.dex_method_index &&
396 UnwrapPointer(data_target->operands[1]) == method.dex_file) {
397 return data_target;
398 }
399 data_target = data_target->next;
400 }
401 return nullptr;
402 }
403
404 /*
405 * The following are building blocks to insert constants into the pool or
406 * instruction streams.
407 */
408
409 /* Add a 32-bit constant to the constant pool */
AddWordData(LIR ** constant_list_p,int value)410 LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
411 /* Add the constant to the literal pool */
412 if (constant_list_p) {
413 LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
414 new_value->operands[0] = value;
415 new_value->next = *constant_list_p;
416 *constant_list_p = new_value;
417 estimated_native_code_size_ += sizeof(value);
418 return new_value;
419 }
420 return NULL;
421 }
422
423 /* Add a 64-bit constant to the constant pool or mixed with code */
AddWideData(LIR ** constant_list_p,int val_lo,int val_hi)424 LIR* Mir2Lir::AddWideData(LIR* *constant_list_p, int val_lo, int val_hi) {
425 AddWordData(constant_list_p, val_hi);
426 return AddWordData(constant_list_p, val_lo);
427 }
428
Push32(std::vector<uint8_t> & buf,int data)429 static void Push32(std::vector<uint8_t>&buf, int data) {
430 buf.push_back(data & 0xff);
431 buf.push_back((data >> 8) & 0xff);
432 buf.push_back((data >> 16) & 0xff);
433 buf.push_back((data >> 24) & 0xff);
434 }
435
436 // Push 8 bytes on 64-bit target systems; 4 on 32-bit target systems.
PushPointer(std::vector<uint8_t> & buf,const void * pointer,bool target64)437 static void PushPointer(std::vector<uint8_t>&buf, const void* pointer, bool target64) {
438 uint64_t data = reinterpret_cast<uintptr_t>(pointer);
439 if (target64) {
440 Push32(buf, data & 0xFFFFFFFF);
441 Push32(buf, (data >> 32) & 0xFFFFFFFF);
442 } else {
443 Push32(buf, static_cast<uint32_t>(data));
444 }
445 }
446
AlignBuffer(std::vector<uint8_t> & buf,size_t offset)447 static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
448 while (buf.size() < offset) {
449 buf.push_back(0);
450 }
451 }
452
453 /* Write the literal pool to the output stream */
InstallLiteralPools()454 void Mir2Lir::InstallLiteralPools() {
455 AlignBuffer(code_buffer_, data_offset_);
456 LIR* data_lir = literal_list_;
457 while (data_lir != NULL) {
458 Push32(code_buffer_, data_lir->operands[0]);
459 data_lir = NEXT_LIR(data_lir);
460 }
461 // Push code and method literals, record offsets for the compiler to patch.
462 data_lir = code_literal_list_;
463 while (data_lir != NULL) {
464 uint32_t target_method_idx = data_lir->operands[0];
465 const DexFile* target_dex_file =
466 reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
467 cu_->compiler_driver->AddCodePatch(cu_->dex_file,
468 cu_->class_def_idx,
469 cu_->method_idx,
470 cu_->invoke_type,
471 target_method_idx,
472 target_dex_file,
473 static_cast<InvokeType>(data_lir->operands[2]),
474 code_buffer_.size());
475 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
476 // unique value based on target to ensure code deduplication works
477 PushPointer(code_buffer_, &target_method_id, cu_->target64);
478 data_lir = NEXT_LIR(data_lir);
479 }
480 data_lir = method_literal_list_;
481 while (data_lir != NULL) {
482 uint32_t target_method_idx = data_lir->operands[0];
483 const DexFile* target_dex_file =
484 reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
485 cu_->compiler_driver->AddMethodPatch(cu_->dex_file,
486 cu_->class_def_idx,
487 cu_->method_idx,
488 cu_->invoke_type,
489 target_method_idx,
490 target_dex_file,
491 static_cast<InvokeType>(data_lir->operands[2]),
492 code_buffer_.size());
493 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
494 // unique value based on target to ensure code deduplication works
495 PushPointer(code_buffer_, &target_method_id, cu_->target64);
496 data_lir = NEXT_LIR(data_lir);
497 }
498 // Push class literals.
499 data_lir = class_literal_list_;
500 while (data_lir != NULL) {
501 uint32_t target_method_idx = data_lir->operands[0];
502 cu_->compiler_driver->AddClassPatch(cu_->dex_file,
503 cu_->class_def_idx,
504 cu_->method_idx,
505 target_method_idx,
506 code_buffer_.size());
507 const DexFile::TypeId& target_method_id = cu_->dex_file->GetTypeId(target_method_idx);
508 // unique value based on target to ensure code deduplication works
509 PushPointer(code_buffer_, &target_method_id, cu_->target64);
510 data_lir = NEXT_LIR(data_lir);
511 }
512 // Push the string literals.
513 data_lir = string_literal_list_;
514 while (data_lir != nullptr) {
515 uint32_t string_idx = data_lir->operands[0];
516 cu_->compiler_driver->AddStringPatch(cu_->dex_file,
517 cu_->class_def_idx,
518 cu_->method_idx,
519 string_idx,
520 code_buffer_.size());
521 const auto& target_string_id = cu_->dex_file->GetStringId(string_idx);
522 // unique value based on target to ensure code deduplication works
523 PushPointer(code_buffer_, &target_string_id, cu_->target64);
524 data_lir = NEXT_LIR(data_lir);
525 }
526 }
527
528 /* Write the switch tables to the output stream */
InstallSwitchTables()529 void Mir2Lir::InstallSwitchTables() {
530 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
531 while (true) {
532 Mir2Lir::SwitchTable* tab_rec = iterator.Next();
533 if (tab_rec == NULL) break;
534 AlignBuffer(code_buffer_, tab_rec->offset);
535 /*
536 * For Arm, our reference point is the address of the bx
537 * instruction that does the launch, so we have to subtract
538 * the auto pc-advance. For other targets the reference point
539 * is a label, so we can use the offset as-is.
540 */
541 int bx_offset = INVALID_OFFSET;
542 switch (cu_->instruction_set) {
543 case kThumb2:
544 DCHECK(tab_rec->anchor->flags.fixup != kFixupNone);
545 bx_offset = tab_rec->anchor->offset + 4;
546 break;
547 case kX86:
548 case kX86_64:
549 bx_offset = 0;
550 break;
551 case kArm64:
552 case kMips:
553 bx_offset = tab_rec->anchor->offset;
554 break;
555 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
556 }
557 if (cu_->verbose) {
558 LOG(INFO) << "Switch table for offset 0x" << std::hex << bx_offset;
559 }
560 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
561 const int32_t* keys = reinterpret_cast<const int32_t*>(&(tab_rec->table[2]));
562 for (int elems = 0; elems < tab_rec->table[1]; elems++) {
563 int disp = tab_rec->targets[elems]->offset - bx_offset;
564 if (cu_->verbose) {
565 LOG(INFO) << " Case[" << elems << "] key: 0x"
566 << std::hex << keys[elems] << ", disp: 0x"
567 << std::hex << disp;
568 }
569 Push32(code_buffer_, keys[elems]);
570 Push32(code_buffer_,
571 tab_rec->targets[elems]->offset - bx_offset);
572 }
573 } else {
574 DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
575 static_cast<int>(Instruction::kPackedSwitchSignature));
576 for (int elems = 0; elems < tab_rec->table[1]; elems++) {
577 int disp = tab_rec->targets[elems]->offset - bx_offset;
578 if (cu_->verbose) {
579 LOG(INFO) << " Case[" << elems << "] disp: 0x"
580 << std::hex << disp;
581 }
582 Push32(code_buffer_, tab_rec->targets[elems]->offset - bx_offset);
583 }
584 }
585 }
586 }
587
588 /* Write the fill array dta to the output stream */
InstallFillArrayData()589 void Mir2Lir::InstallFillArrayData() {
590 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
591 while (true) {
592 Mir2Lir::FillArrayData *tab_rec = iterator.Next();
593 if (tab_rec == NULL) break;
594 AlignBuffer(code_buffer_, tab_rec->offset);
595 for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
596 code_buffer_.push_back(tab_rec->table[i] & 0xFF);
597 code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
598 }
599 }
600 }
601
AssignLiteralOffsetCommon(LIR * lir,CodeOffset offset)602 static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
603 for (; lir != NULL; lir = lir->next) {
604 lir->offset = offset;
605 offset += 4;
606 }
607 return offset;
608 }
609
AssignLiteralPointerOffsetCommon(LIR * lir,CodeOffset offset,unsigned int element_size)610 static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset,
611 unsigned int element_size) {
612 // Align to natural pointer size.
613 offset = RoundUp(offset, element_size);
614 for (; lir != NULL; lir = lir->next) {
615 lir->offset = offset;
616 offset += element_size;
617 }
618 return offset;
619 }
620
621 // Make sure we have a code address for every declared catch entry
VerifyCatchEntries()622 bool Mir2Lir::VerifyCatchEntries() {
623 MappingTable table(&encoded_mapping_table_[0]);
624 std::vector<uint32_t> dex_pcs;
625 dex_pcs.reserve(table.DexToPcSize());
626 for (auto it = table.DexToPcBegin(), end = table.DexToPcEnd(); it != end; ++it) {
627 dex_pcs.push_back(it.DexPc());
628 }
629 // Sort dex_pcs, so that we can quickly check it against the ordered mir_graph_->catches_.
630 std::sort(dex_pcs.begin(), dex_pcs.end());
631
632 bool success = true;
633 auto it = dex_pcs.begin(), end = dex_pcs.end();
634 for (uint32_t dex_pc : mir_graph_->catches_) {
635 while (it != end && *it < dex_pc) {
636 LOG(INFO) << "Unexpected catch entry @ dex pc 0x" << std::hex << *it;
637 ++it;
638 success = false;
639 }
640 if (it == end || *it > dex_pc) {
641 LOG(INFO) << "Missing native PC for catch entry @ 0x" << std::hex << dex_pc;
642 success = false;
643 } else {
644 ++it;
645 }
646 }
647 if (!success) {
648 LOG(INFO) << "Bad dex2pcMapping table in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
649 LOG(INFO) << "Entries @ decode: " << mir_graph_->catches_.size() << ", Entries in table: "
650 << table.DexToPcSize();
651 }
652 return success;
653 }
654
655
CreateMappingTables()656 void Mir2Lir::CreateMappingTables() {
657 uint32_t pc2dex_data_size = 0u;
658 uint32_t pc2dex_entries = 0u;
659 uint32_t pc2dex_offset = 0u;
660 uint32_t pc2dex_dalvik_offset = 0u;
661 uint32_t dex2pc_data_size = 0u;
662 uint32_t dex2pc_entries = 0u;
663 uint32_t dex2pc_offset = 0u;
664 uint32_t dex2pc_dalvik_offset = 0u;
665 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
666 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
667 pc2dex_entries += 1;
668 DCHECK(pc2dex_offset <= tgt_lir->offset);
669 pc2dex_data_size += UnsignedLeb128Size(tgt_lir->offset - pc2dex_offset);
670 pc2dex_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) -
671 static_cast<int32_t>(pc2dex_dalvik_offset));
672 pc2dex_offset = tgt_lir->offset;
673 pc2dex_dalvik_offset = tgt_lir->dalvik_offset;
674 }
675 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
676 dex2pc_entries += 1;
677 DCHECK(dex2pc_offset <= tgt_lir->offset);
678 dex2pc_data_size += UnsignedLeb128Size(tgt_lir->offset - dex2pc_offset);
679 dex2pc_data_size += SignedLeb128Size(static_cast<int32_t>(tgt_lir->dalvik_offset) -
680 static_cast<int32_t>(dex2pc_dalvik_offset));
681 dex2pc_offset = tgt_lir->offset;
682 dex2pc_dalvik_offset = tgt_lir->dalvik_offset;
683 }
684 }
685
686 uint32_t total_entries = pc2dex_entries + dex2pc_entries;
687 uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
688 uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
689 encoded_mapping_table_.resize(data_size);
690 uint8_t* write_pos = &encoded_mapping_table_[0];
691 write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
692 write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
693 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), hdr_data_size);
694 uint8_t* write_pos2 = write_pos + pc2dex_data_size;
695
696 pc2dex_offset = 0u;
697 pc2dex_dalvik_offset = 0u;
698 dex2pc_offset = 0u;
699 dex2pc_dalvik_offset = 0u;
700 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
701 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
702 DCHECK(pc2dex_offset <= tgt_lir->offset);
703 write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset);
704 write_pos = EncodeSignedLeb128(write_pos, static_cast<int32_t>(tgt_lir->dalvik_offset) -
705 static_cast<int32_t>(pc2dex_dalvik_offset));
706 pc2dex_offset = tgt_lir->offset;
707 pc2dex_dalvik_offset = tgt_lir->dalvik_offset;
708 }
709 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
710 DCHECK(dex2pc_offset <= tgt_lir->offset);
711 write_pos2 = EncodeUnsignedLeb128(write_pos2, tgt_lir->offset - dex2pc_offset);
712 write_pos2 = EncodeSignedLeb128(write_pos2, static_cast<int32_t>(tgt_lir->dalvik_offset) -
713 static_cast<int32_t>(dex2pc_dalvik_offset));
714 dex2pc_offset = tgt_lir->offset;
715 dex2pc_dalvik_offset = tgt_lir->dalvik_offset;
716 }
717 }
718 DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]),
719 hdr_data_size + pc2dex_data_size);
720 DCHECK_EQ(static_cast<size_t>(write_pos2 - &encoded_mapping_table_[0]), data_size);
721
722 if (kIsDebugBuild) {
723 CHECK(VerifyCatchEntries());
724
725 // Verify the encoded table holds the expected data.
726 MappingTable table(&encoded_mapping_table_[0]);
727 CHECK_EQ(table.TotalSize(), total_entries);
728 CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
729 auto it = table.PcToDexBegin();
730 auto it2 = table.DexToPcBegin();
731 for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
732 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
733 CHECK_EQ(tgt_lir->offset, it.NativePcOffset());
734 CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc());
735 ++it;
736 }
737 if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoExportedPC)) {
738 CHECK_EQ(tgt_lir->offset, it2.NativePcOffset());
739 CHECK_EQ(tgt_lir->dalvik_offset, it2.DexPc());
740 ++it2;
741 }
742 }
743 CHECK(it == table.PcToDexEnd());
744 CHECK(it2 == table.DexToPcEnd());
745 }
746 }
747
CreateNativeGcMap()748 void Mir2Lir::CreateNativeGcMap() {
749 DCHECK(!encoded_mapping_table_.empty());
750 MappingTable mapping_table(&encoded_mapping_table_[0]);
751 uint32_t max_native_offset = 0;
752 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) {
753 uint32_t native_offset = it.NativePcOffset();
754 if (native_offset > max_native_offset) {
755 max_native_offset = native_offset;
756 }
757 }
758 MethodReference method_ref(cu_->dex_file, cu_->method_idx);
759 const std::vector<uint8_t>& gc_map_raw =
760 mir_graph_->GetCurrentDexCompilationUnit()->GetVerifiedMethod()->GetDexGcMap();
761 verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
762 DCHECK_EQ(gc_map_raw.size(), dex_gc_map.RawSize());
763 // Compute native offset to references size.
764 GcMapBuilder native_gc_map_builder(&native_gc_map_,
765 mapping_table.PcToDexSize(),
766 max_native_offset, dex_gc_map.RegWidth());
767
768 for (auto it = mapping_table.PcToDexBegin(), end = mapping_table.PcToDexEnd(); it != end; ++it) {
769 uint32_t native_offset = it.NativePcOffset();
770 uint32_t dex_pc = it.DexPc();
771 const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
772 CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
773 ": " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
774 native_gc_map_builder.AddEntry(native_offset, references);
775 }
776
777 // Maybe not necessary, but this could help prevent errors where we access the verified method
778 // after it has been deleted.
779 mir_graph_->GetCurrentDexCompilationUnit()->ClearVerifiedMethod();
780 }
781
782 /* Determine the offset of each literal field */
AssignLiteralOffset(CodeOffset offset)783 int Mir2Lir::AssignLiteralOffset(CodeOffset offset) {
784 offset = AssignLiteralOffsetCommon(literal_list_, offset);
785 unsigned int ptr_size = GetInstructionSetPointerSize(cu_->instruction_set);
786 offset = AssignLiteralPointerOffsetCommon(code_literal_list_, offset, ptr_size);
787 offset = AssignLiteralPointerOffsetCommon(method_literal_list_, offset, ptr_size);
788 offset = AssignLiteralPointerOffsetCommon(class_literal_list_, offset, ptr_size);
789 offset = AssignLiteralPointerOffsetCommon(string_literal_list_, offset, ptr_size);
790 return offset;
791 }
792
AssignSwitchTablesOffset(CodeOffset offset)793 int Mir2Lir::AssignSwitchTablesOffset(CodeOffset offset) {
794 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
795 while (true) {
796 Mir2Lir::SwitchTable* tab_rec = iterator.Next();
797 if (tab_rec == NULL) break;
798 tab_rec->offset = offset;
799 if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
800 offset += tab_rec->table[1] * (sizeof(int) * 2);
801 } else {
802 DCHECK_EQ(static_cast<int>(tab_rec->table[0]),
803 static_cast<int>(Instruction::kPackedSwitchSignature));
804 offset += tab_rec->table[1] * sizeof(int);
805 }
806 }
807 return offset;
808 }
809
AssignFillArrayDataOffset(CodeOffset offset)810 int Mir2Lir::AssignFillArrayDataOffset(CodeOffset offset) {
811 GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
812 while (true) {
813 Mir2Lir::FillArrayData *tab_rec = iterator.Next();
814 if (tab_rec == NULL) break;
815 tab_rec->offset = offset;
816 offset += tab_rec->size;
817 // word align
818 offset = RoundUp(offset, 4);
819 }
820 return offset;
821 }
822
823 /*
824 * Insert a kPseudoCaseLabel at the beginning of the Dalvik
825 * offset vaddr if pretty-printing, otherise use the standard block
826 * label. The selected label will be used to fix up the case
827 * branch table during the assembly phase. All resource flags
828 * are set to prevent code motion. KeyVal is just there for debugging.
829 */
InsertCaseLabel(DexOffset vaddr,int keyVal)830 LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) {
831 LIR* boundary_lir = &block_label_list_[mir_graph_->FindBlock(vaddr)->id];
832 LIR* res = boundary_lir;
833 if (cu_->verbose) {
834 // Only pay the expense if we're pretty-printing.
835 LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
836 new_label->dalvik_offset = vaddr;
837 new_label->opcode = kPseudoCaseLabel;
838 new_label->operands[0] = keyVal;
839 new_label->flags.fixup = kFixupLabel;
840 DCHECK(!new_label->flags.use_def_invalid);
841 new_label->u.m.def_mask = &kEncodeAll;
842 InsertLIRAfter(boundary_lir, new_label);
843 res = new_label;
844 }
845 return res;
846 }
847
MarkPackedCaseLabels(Mir2Lir::SwitchTable * tab_rec)848 void Mir2Lir::MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
849 const uint16_t* table = tab_rec->table;
850 DexOffset base_vaddr = tab_rec->vaddr;
851 const int32_t *targets = reinterpret_cast<const int32_t*>(&table[4]);
852 int entries = table[1];
853 int low_key = s4FromSwitchData(&table[2]);
854 for (int i = 0; i < entries; i++) {
855 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], i + low_key);
856 }
857 }
858
MarkSparseCaseLabels(Mir2Lir::SwitchTable * tab_rec)859 void Mir2Lir::MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec) {
860 const uint16_t* table = tab_rec->table;
861 DexOffset base_vaddr = tab_rec->vaddr;
862 int entries = table[1];
863 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
864 const int32_t* targets = &keys[entries];
865 for (int i = 0; i < entries; i++) {
866 tab_rec->targets[i] = InsertCaseLabel(base_vaddr + targets[i], keys[i]);
867 }
868 }
869
ProcessSwitchTables()870 void Mir2Lir::ProcessSwitchTables() {
871 GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
872 while (true) {
873 Mir2Lir::SwitchTable *tab_rec = iterator.Next();
874 if (tab_rec == NULL) break;
875 if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
876 MarkPackedCaseLabels(tab_rec);
877 } else if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
878 MarkSparseCaseLabels(tab_rec);
879 } else {
880 LOG(FATAL) << "Invalid switch table";
881 }
882 }
883 }
884
DumpSparseSwitchTable(const uint16_t * table)885 void Mir2Lir::DumpSparseSwitchTable(const uint16_t* table) {
886 /*
887 * Sparse switch data format:
888 * ushort ident = 0x0200 magic value
889 * ushort size number of entries in the table; > 0
890 * int keys[size] keys, sorted low-to-high; 32-bit aligned
891 * int targets[size] branch targets, relative to switch opcode
892 *
893 * Total size is (2+size*4) 16-bit code units.
894 */
895 uint16_t ident = table[0];
896 int entries = table[1];
897 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
898 const int32_t* targets = &keys[entries];
899 LOG(INFO) << "Sparse switch table - ident:0x" << std::hex << ident
900 << ", entries: " << std::dec << entries;
901 for (int i = 0; i < entries; i++) {
902 LOG(INFO) << " Key[" << keys[i] << "] -> 0x" << std::hex << targets[i];
903 }
904 }
905
DumpPackedSwitchTable(const uint16_t * table)906 void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
907 /*
908 * Packed switch data format:
909 * ushort ident = 0x0100 magic value
910 * ushort size number of entries in the table
911 * int first_key first (and lowest) switch case value
912 * int targets[size] branch targets, relative to switch opcode
913 *
914 * Total size is (4+size*2) 16-bit code units.
915 */
916 uint16_t ident = table[0];
917 const int32_t* targets = reinterpret_cast<const int32_t*>(&table[4]);
918 int entries = table[1];
919 int low_key = s4FromSwitchData(&table[2]);
920 LOG(INFO) << "Packed switch table - ident:0x" << std::hex << ident
921 << ", entries: " << std::dec << entries << ", low_key: " << low_key;
922 for (int i = 0; i < entries; i++) {
923 LOG(INFO) << " Key[" << (i + low_key) << "] -> 0x" << std::hex
924 << targets[i];
925 }
926 }
927
928 /* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
MarkBoundary(DexOffset offset,const char * inst_str)929 void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
930 // NOTE: only used for debug listings.
931 NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
932 }
933
EvaluateBranch(Instruction::Code opcode,int32_t src1,int32_t src2)934 bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
935 bool is_taken;
936 switch (opcode) {
937 case Instruction::IF_EQ: is_taken = (src1 == src2); break;
938 case Instruction::IF_NE: is_taken = (src1 != src2); break;
939 case Instruction::IF_LT: is_taken = (src1 < src2); break;
940 case Instruction::IF_GE: is_taken = (src1 >= src2); break;
941 case Instruction::IF_GT: is_taken = (src1 > src2); break;
942 case Instruction::IF_LE: is_taken = (src1 <= src2); break;
943 case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
944 case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
945 case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
946 case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
947 case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
948 case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
949 default:
950 LOG(FATAL) << "Unexpected opcode " << opcode;
951 is_taken = false;
952 }
953 return is_taken;
954 }
955
956 // Convert relation of src1/src2 to src2/src1
FlipComparisonOrder(ConditionCode before)957 ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
958 ConditionCode res;
959 switch (before) {
960 case kCondEq: res = kCondEq; break;
961 case kCondNe: res = kCondNe; break;
962 case kCondLt: res = kCondGt; break;
963 case kCondGt: res = kCondLt; break;
964 case kCondLe: res = kCondGe; break;
965 case kCondGe: res = kCondLe; break;
966 default:
967 res = static_cast<ConditionCode>(0);
968 LOG(FATAL) << "Unexpected ccode " << before;
969 }
970 return res;
971 }
972
NegateComparison(ConditionCode before)973 ConditionCode Mir2Lir::NegateComparison(ConditionCode before) {
974 ConditionCode res;
975 switch (before) {
976 case kCondEq: res = kCondNe; break;
977 case kCondNe: res = kCondEq; break;
978 case kCondLt: res = kCondGe; break;
979 case kCondGt: res = kCondLe; break;
980 case kCondLe: res = kCondGt; break;
981 case kCondGe: res = kCondLt; break;
982 default:
983 res = static_cast<ConditionCode>(0);
984 LOG(FATAL) << "Unexpected ccode " << before;
985 }
986 return res;
987 }
988
989 // TODO: move to mir_to_lir.cc
Mir2Lir(CompilationUnit * cu,MIRGraph * mir_graph,ArenaAllocator * arena)990 Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
991 : Backend(arena),
992 literal_list_(NULL),
993 method_literal_list_(NULL),
994 class_literal_list_(NULL),
995 string_literal_list_(NULL),
996 code_literal_list_(NULL),
997 first_fixup_(NULL),
998 cu_(cu),
999 mir_graph_(mir_graph),
1000 switch_tables_(arena, 4, kGrowableArraySwitchTables),
1001 fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
1002 tempreg_info_(arena, 20, kGrowableArrayMisc),
1003 reginfo_map_(arena, RegStorage::kMaxRegs, kGrowableArrayMisc),
1004 pointer_storage_(arena, 128, kGrowableArrayMisc),
1005 data_offset_(0),
1006 total_size_(0),
1007 block_label_list_(NULL),
1008 promotion_map_(NULL),
1009 current_dalvik_offset_(0),
1010 estimated_native_code_size_(0),
1011 reg_pool_(NULL),
1012 live_sreg_(0),
1013 core_vmap_table_(mir_graph->GetArena()->Adapter()),
1014 fp_vmap_table_(mir_graph->GetArena()->Adapter()),
1015 num_core_spills_(0),
1016 num_fp_spills_(0),
1017 frame_size_(0),
1018 core_spill_mask_(0),
1019 fp_spill_mask_(0),
1020 first_lir_insn_(NULL),
1021 last_lir_insn_(NULL),
1022 slow_paths_(arena, 32, kGrowableArraySlowPaths),
1023 mem_ref_type_(ResourceMask::kHeapRef),
1024 mask_cache_(arena) {
1025 // Reserve pointer id 0 for NULL.
1026 size_t null_idx = WrapPointer(NULL);
1027 DCHECK_EQ(null_idx, 0U);
1028 }
1029
Materialize()1030 void Mir2Lir::Materialize() {
1031 cu_->NewTimingSplit("RegisterAllocation");
1032 CompilerInitializeRegAlloc(); // Needs to happen after SSA naming
1033
1034 /* Allocate Registers using simple local allocation scheme */
1035 SimpleRegAlloc();
1036
1037 /* First try the custom light codegen for special cases. */
1038 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1039 bool special_worked = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1040 ->GenSpecial(this, cu_->method_idx);
1041
1042 /* Take normal path for converting MIR to LIR only if the special codegen did not succeed. */
1043 if (special_worked == false) {
1044 MethodMIR2LIR();
1045 }
1046
1047 /* Method is not empty */
1048 if (first_lir_insn_) {
1049 // mark the targets of switch statement case labels
1050 ProcessSwitchTables();
1051
1052 /* Convert LIR into machine code. */
1053 AssembleLIR();
1054
1055 if ((cu_->enable_debug & (1 << kDebugCodegenDump)) != 0) {
1056 CodegenDump();
1057 }
1058 }
1059 }
1060
GetCompiledMethod()1061 CompiledMethod* Mir2Lir::GetCompiledMethod() {
1062 // Combine vmap tables - core regs, then fp regs - into vmap_table.
1063 Leb128EncodingVector vmap_encoder;
1064 if (frame_size_ > 0) {
1065 // Prefix the encoded data with its size.
1066 size_t size = core_vmap_table_.size() + 1 /* marker */ + fp_vmap_table_.size();
1067 vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128).
1068 vmap_encoder.PushBackUnsigned(size);
1069 // Core regs may have been inserted out of order - sort first.
1070 std::sort(core_vmap_table_.begin(), core_vmap_table_.end());
1071 for (size_t i = 0 ; i < core_vmap_table_.size(); ++i) {
1072 // Copy, stripping out the phys register sort key.
1073 vmap_encoder.PushBackUnsigned(
1074 ~(-1 << VREG_NUM_WIDTH) & (core_vmap_table_[i] + VmapTable::kEntryAdjustment));
1075 }
1076 // Push a marker to take place of lr.
1077 vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
1078 if (cu_->instruction_set == kThumb2) {
1079 // fp regs already sorted.
1080 for (uint32_t i = 0; i < fp_vmap_table_.size(); i++) {
1081 vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment);
1082 }
1083 } else {
1084 // For other platforms regs may have been inserted out of order - sort first.
1085 std::sort(fp_vmap_table_.begin(), fp_vmap_table_.end());
1086 for (size_t i = 0 ; i < fp_vmap_table_.size(); ++i) {
1087 // Copy, stripping out the phys register sort key.
1088 vmap_encoder.PushBackUnsigned(
1089 ~(-1 << VREG_NUM_WIDTH) & (fp_vmap_table_[i] + VmapTable::kEntryAdjustment));
1090 }
1091 }
1092 } else {
1093 DCHECK_EQ(POPCOUNT(core_spill_mask_), 0);
1094 DCHECK_EQ(POPCOUNT(fp_spill_mask_), 0);
1095 DCHECK_EQ(core_vmap_table_.size(), 0u);
1096 DCHECK_EQ(fp_vmap_table_.size(), 0u);
1097 vmap_encoder.PushBackUnsigned(0u); // Size is 0.
1098 }
1099
1100 std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnCallFrameInformation());
1101 ArrayRef<const uint8_t> cfi_ref;
1102 if (cfi_info.get() != nullptr) {
1103 cfi_ref = ArrayRef<const uint8_t>(*cfi_info);
1104 }
1105 return CompiledMethod::SwapAllocCompiledMethod(
1106 cu_->compiler_driver, cu_->instruction_set,
1107 ArrayRef<const uint8_t>(code_buffer_),
1108 frame_size_, core_spill_mask_, fp_spill_mask_,
1109 ArrayRef<const uint8_t>(encoded_mapping_table_),
1110 ArrayRef<const uint8_t>(vmap_encoder.GetData()),
1111 ArrayRef<const uint8_t>(native_gc_map_),
1112 cfi_ref);
1113 }
1114
GetMaxPossibleCompilerTemps() const1115 size_t Mir2Lir::GetMaxPossibleCompilerTemps() const {
1116 // Chose a reasonably small value in order to contain stack growth.
1117 // Backends that are smarter about spill region can return larger values.
1118 const size_t max_compiler_temps = 10;
1119 return max_compiler_temps;
1120 }
1121
GetNumBytesForCompilerTempSpillRegion()1122 size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() {
1123 // By default assume that the Mir2Lir will need one slot for each temporary.
1124 // If the backend can better determine temps that have non-overlapping ranges and
1125 // temps that do not need spilled, it can actually provide a small region.
1126 return (mir_graph_->GetNumUsedCompilerTemps() * sizeof(uint32_t));
1127 }
1128
ComputeFrameSize()1129 int Mir2Lir::ComputeFrameSize() {
1130 /* Figure out the frame size */
1131 uint32_t size = num_core_spills_ * GetBytesPerGprSpillLocation(cu_->instruction_set)
1132 + num_fp_spills_ * GetBytesPerFprSpillLocation(cu_->instruction_set)
1133 + sizeof(uint32_t) // Filler.
1134 + (cu_->num_regs + cu_->num_outs) * sizeof(uint32_t)
1135 + GetNumBytesForCompilerTempSpillRegion();
1136 /* Align and set */
1137 return RoundUp(size, kStackAlignment);
1138 }
1139
1140 /*
1141 * Append an LIR instruction to the LIR list maintained by a compilation
1142 * unit
1143 */
AppendLIR(LIR * lir)1144 void Mir2Lir::AppendLIR(LIR* lir) {
1145 if (first_lir_insn_ == NULL) {
1146 DCHECK(last_lir_insn_ == NULL);
1147 last_lir_insn_ = first_lir_insn_ = lir;
1148 lir->prev = lir->next = NULL;
1149 } else {
1150 last_lir_insn_->next = lir;
1151 lir->prev = last_lir_insn_;
1152 lir->next = NULL;
1153 last_lir_insn_ = lir;
1154 }
1155 }
1156
1157 /*
1158 * Insert an LIR instruction before the current instruction, which cannot be the
1159 * first instruction.
1160 *
1161 * prev_lir <-> new_lir <-> current_lir
1162 */
InsertLIRBefore(LIR * current_lir,LIR * new_lir)1163 void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
1164 DCHECK(current_lir->prev != NULL);
1165 LIR *prev_lir = current_lir->prev;
1166
1167 prev_lir->next = new_lir;
1168 new_lir->prev = prev_lir;
1169 new_lir->next = current_lir;
1170 current_lir->prev = new_lir;
1171 }
1172
1173 /*
1174 * Insert an LIR instruction after the current instruction, which cannot be the
1175 * last instruction.
1176 *
1177 * current_lir -> new_lir -> old_next
1178 */
InsertLIRAfter(LIR * current_lir,LIR * new_lir)1179 void Mir2Lir::InsertLIRAfter(LIR* current_lir, LIR* new_lir) {
1180 new_lir->prev = current_lir;
1181 new_lir->next = current_lir->next;
1182 current_lir->next = new_lir;
1183 new_lir->next->prev = new_lir;
1184 }
1185
IsPowerOfTwo(uint64_t x)1186 bool Mir2Lir::IsPowerOfTwo(uint64_t x) {
1187 return (x & (x - 1)) == 0;
1188 }
1189
1190 // Returns the index of the lowest set bit in 'x'.
LowestSetBit(uint64_t x)1191 int32_t Mir2Lir::LowestSetBit(uint64_t x) {
1192 int bit_posn = 0;
1193 while ((x & 0xf) == 0) {
1194 bit_posn += 4;
1195 x >>= 4;
1196 }
1197 while ((x & 1) == 0) {
1198 bit_posn++;
1199 x >>= 1;
1200 }
1201 return bit_posn;
1202 }
1203
BadOverlap(RegLocation rl_src,RegLocation rl_dest)1204 bool Mir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) {
1205 DCHECK(rl_src.wide);
1206 DCHECK(rl_dest.wide);
1207 return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
1208 }
1209
OpCmpMemImmBranch(ConditionCode cond,RegStorage temp_reg,RegStorage base_reg,int offset,int check_value,LIR * target,LIR ** compare)1210 LIR *Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
1211 int offset, int check_value, LIR* target, LIR** compare) {
1212 // Handle this for architectures that can't compare to memory.
1213 LIR* inst = Load32Disp(base_reg, offset, temp_reg);
1214 if (compare != nullptr) {
1215 *compare = inst;
1216 }
1217 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
1218 return branch;
1219 }
1220
AddSlowPath(LIRSlowPath * slowpath)1221 void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) {
1222 slow_paths_.Insert(slowpath);
1223 }
1224
LoadCodeAddress(const MethodReference & target_method,InvokeType type,SpecialTargetRegister symbolic_reg)1225 void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
1226 SpecialTargetRegister symbolic_reg) {
1227 LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method);
1228 if (data_target == NULL) {
1229 data_target = AddWordData(&code_literal_list_, target_method.dex_method_index);
1230 data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
1231 // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
1232 // the same method invoked with kVirtual, kSuper and kInterface but the class linker will
1233 // resolve these invokes to the same method, so we don't care which one we record here.
1234 data_target->operands[2] = type;
1235 }
1236 // Loads a code pointer. Code from oat file can be mapped anywhere.
1237 LIR* load_pc_rel = OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target);
1238 AppendLIR(load_pc_rel);
1239 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
1240 }
1241
LoadMethodAddress(const MethodReference & target_method,InvokeType type,SpecialTargetRegister symbolic_reg)1242 void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
1243 SpecialTargetRegister symbolic_reg) {
1244 LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method);
1245 if (data_target == NULL) {
1246 data_target = AddWordData(&method_literal_list_, target_method.dex_method_index);
1247 data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
1248 // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
1249 // the same method invoked with kVirtual, kSuper and kInterface but the class linker will
1250 // resolve these invokes to the same method, so we don't care which one we record here.
1251 data_target->operands[2] = type;
1252 }
1253 // Loads an ArtMethod pointer, which is a reference as it lives in the heap.
1254 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
1255 AppendLIR(load_pc_rel);
1256 DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
1257 }
1258
LoadClassType(uint32_t type_idx,SpecialTargetRegister symbolic_reg)1259 void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
1260 // Use the literal pool and a PC-relative load from a data word.
1261 LIR* data_target = ScanLiteralPool(class_literal_list_, type_idx, 0);
1262 if (data_target == nullptr) {
1263 data_target = AddWordData(&class_literal_list_, type_idx);
1264 }
1265 // Loads a Class pointer, which is a reference as it lives in the heap.
1266 LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
1267 AppendLIR(load_pc_rel);
1268 }
1269
LoadString(uint32_t string_idx,RegStorage target_reg)1270 void Mir2Lir::LoadString(uint32_t string_idx, RegStorage target_reg) {
1271 // Use the literal pool and a PC-relative load from a data word.
1272 LIR* data_target = ScanLiteralPool(string_literal_list_, string_idx, 0);
1273 if (data_target == nullptr) {
1274 data_target = AddWordData(&string_literal_list_, string_idx);
1275 }
1276 // Loads a Class pointer, which is a reference as it lives in the heap.
1277 LIR* load_pc_rel = OpPcRelLoad(target_reg, data_target);
1278 AppendLIR(load_pc_rel);
1279 }
1280
ReturnCallFrameInformation()1281 std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() {
1282 // Default case is to do nothing.
1283 return nullptr;
1284 }
1285
NarrowRegLoc(RegLocation loc)1286 RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) {
1287 if (loc.location == kLocPhysReg) {
1288 DCHECK(!loc.reg.Is32Bit());
1289 if (loc.reg.IsPair()) {
1290 RegisterInfo* info_lo = GetRegInfo(loc.reg.GetLow());
1291 RegisterInfo* info_hi = GetRegInfo(loc.reg.GetHigh());
1292 info_lo->SetIsWide(false);
1293 info_hi->SetIsWide(false);
1294 loc.reg = info_lo->GetReg();
1295 } else {
1296 RegisterInfo* info = GetRegInfo(loc.reg);
1297 RegisterInfo* info_new = info->FindMatchingView(RegisterInfo::k32SoloStorageMask);
1298 DCHECK(info_new != nullptr);
1299 if (info->IsLive() && (info->SReg() == loc.s_reg_low)) {
1300 info->MarkDead();
1301 info_new->MarkLive(loc.s_reg_low);
1302 }
1303 loc.reg = info_new->GetReg();
1304 }
1305 DCHECK(loc.reg.Valid());
1306 }
1307 loc.wide = false;
1308 return loc;
1309 }
1310
GenMachineSpecificExtendedMethodMIR(BasicBlock * bb,MIR * mir)1311 void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
1312 LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
1313 }
1314
1315 } // namespace art
1316