1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_generator.h"
18
19 #ifdef ART_ENABLE_CODEGEN_arm
20 #include "code_generator_arm_vixl.h"
21 #endif
22
23 #ifdef ART_ENABLE_CODEGEN_arm64
24 #include "code_generator_arm64.h"
25 #endif
26
27 #ifdef ART_ENABLE_CODEGEN_x86
28 #include "code_generator_x86.h"
29 #endif
30
31 #ifdef ART_ENABLE_CODEGEN_x86_64
32 #include "code_generator_x86_64.h"
33 #endif
34
35 #include "base/bit_utils.h"
36 #include "base/bit_utils_iterator.h"
37 #include "base/casts.h"
38 #include "base/leb128.h"
39 #include "class_linker.h"
40 #include "compiled_method.h"
41 #include "dex/bytecode_utils.h"
42 #include "dex/code_item_accessors-inl.h"
43 #include "dex/verified_method.h"
44 #include "graph_visualizer.h"
45 #include "image.h"
46 #include "gc/space/image_space.h"
47 #include "intern_table.h"
48 #include "intrinsics.h"
49 #include "mirror/array-inl.h"
50 #include "mirror/object_array-inl.h"
51 #include "mirror/object_reference.h"
52 #include "mirror/reference.h"
53 #include "mirror/string.h"
54 #include "parallel_move_resolver.h"
55 #include "scoped_thread_state_change-inl.h"
56 #include "ssa_liveness_analysis.h"
57 #include "stack_map.h"
58 #include "stack_map_stream.h"
59 #include "string_builder_append.h"
60 #include "thread-current-inl.h"
61 #include "utils/assembler.h"
62
63 namespace art {
64
65 // Return whether a location is consistent with a type.
CheckType(DataType::Type type,Location location)66 static bool CheckType(DataType::Type type, Location location) {
67 if (location.IsFpuRegister()
68 || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
69 return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64);
70 } else if (location.IsRegister() ||
71 (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
72 return DataType::IsIntegralType(type) || (type == DataType::Type::kReference);
73 } else if (location.IsRegisterPair()) {
74 return type == DataType::Type::kInt64;
75 } else if (location.IsFpuRegisterPair()) {
76 return type == DataType::Type::kFloat64;
77 } else if (location.IsStackSlot()) {
78 return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64)
79 || (type == DataType::Type::kFloat32)
80 || (type == DataType::Type::kReference);
81 } else if (location.IsDoubleStackSlot()) {
82 return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
83 } else if (location.IsConstant()) {
84 if (location.GetConstant()->IsIntConstant()) {
85 return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64);
86 } else if (location.GetConstant()->IsNullConstant()) {
87 return type == DataType::Type::kReference;
88 } else if (location.GetConstant()->IsLongConstant()) {
89 return type == DataType::Type::kInt64;
90 } else if (location.GetConstant()->IsFloatConstant()) {
91 return type == DataType::Type::kFloat32;
92 } else {
93 return location.GetConstant()->IsDoubleConstant()
94 && (type == DataType::Type::kFloat64);
95 }
96 } else {
97 return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
98 }
99 }
100
101 // Check that a location summary is consistent with an instruction.
CheckTypeConsistency(HInstruction * instruction)102 static bool CheckTypeConsistency(HInstruction* instruction) {
103 LocationSummary* locations = instruction->GetLocations();
104 if (locations == nullptr) {
105 return true;
106 }
107
108 if (locations->Out().IsUnallocated()
109 && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
110 DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
111 << instruction->GetType()
112 << " " << locations->InAt(0);
113 } else {
114 DCHECK(CheckType(instruction->GetType(), locations->Out()))
115 << instruction->GetType()
116 << " " << locations->Out();
117 }
118
119 HConstInputsRef inputs = instruction->GetInputs();
120 for (size_t i = 0; i < inputs.size(); ++i) {
121 DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
122 << inputs[i]->GetType() << " " << locations->InAt(i);
123 }
124
125 HEnvironment* environment = instruction->GetEnvironment();
126 for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
127 if (environment->GetInstructionAt(i) != nullptr) {
128 DataType::Type type = environment->GetInstructionAt(i)->GetType();
129 DCHECK(CheckType(type, environment->GetLocationAt(i)))
130 << type << " " << environment->GetLocationAt(i);
131 } else {
132 DCHECK(environment->GetLocationAt(i).IsInvalid())
133 << environment->GetLocationAt(i);
134 }
135 }
136 return true;
137 }
138
139 class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllocCodeGenerator> {
140 public:
Create(ArenaStack * arena_stack,InstructionSet instruction_set)141 static std::unique_ptr<CodeGenerationData> Create(ArenaStack* arena_stack,
142 InstructionSet instruction_set) {
143 ScopedArenaAllocator allocator(arena_stack);
144 void* memory = allocator.Alloc<CodeGenerationData>(kArenaAllocCodeGenerator);
145 return std::unique_ptr<CodeGenerationData>(
146 ::new (memory) CodeGenerationData(std::move(allocator), instruction_set));
147 }
148
GetScopedAllocator()149 ScopedArenaAllocator* GetScopedAllocator() {
150 return &allocator_;
151 }
152
AddSlowPath(SlowPathCode * slow_path)153 void AddSlowPath(SlowPathCode* slow_path) {
154 slow_paths_.emplace_back(std::unique_ptr<SlowPathCode>(slow_path));
155 }
156
GetSlowPaths() const157 ArrayRef<const std::unique_ptr<SlowPathCode>> GetSlowPaths() const {
158 return ArrayRef<const std::unique_ptr<SlowPathCode>>(slow_paths_);
159 }
160
GetStackMapStream()161 StackMapStream* GetStackMapStream() { return &stack_map_stream_; }
162
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)163 void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string) {
164 jit_string_roots_.Overwrite(string_reference,
165 reinterpret_cast64<uint64_t>(string.GetReference()));
166 }
167
GetJitStringRootIndex(StringReference string_reference) const168 uint64_t GetJitStringRootIndex(StringReference string_reference) const {
169 return jit_string_roots_.Get(string_reference);
170 }
171
GetNumberOfJitStringRoots() const172 size_t GetNumberOfJitStringRoots() const {
173 return jit_string_roots_.size();
174 }
175
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)176 void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
177 jit_class_roots_.Overwrite(type_reference, reinterpret_cast64<uint64_t>(klass.GetReference()));
178 }
179
GetJitClassRootIndex(TypeReference type_reference) const180 uint64_t GetJitClassRootIndex(TypeReference type_reference) const {
181 return jit_class_roots_.Get(type_reference);
182 }
183
GetNumberOfJitClassRoots() const184 size_t GetNumberOfJitClassRoots() const {
185 return jit_class_roots_.size();
186 }
187
GetNumberOfJitRoots() const188 size_t GetNumberOfJitRoots() const {
189 return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
190 }
191
192 void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots)
193 REQUIRES_SHARED(Locks::mutator_lock_);
194
195 private:
CodeGenerationData(ScopedArenaAllocator && allocator,InstructionSet instruction_set)196 CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set)
197 : allocator_(std::move(allocator)),
198 stack_map_stream_(&allocator_, instruction_set),
199 slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)),
200 jit_string_roots_(StringReferenceValueComparator(),
201 allocator_.Adapter(kArenaAllocCodeGenerator)),
202 jit_class_roots_(TypeReferenceValueComparator(),
203 allocator_.Adapter(kArenaAllocCodeGenerator)) {
204 slow_paths_.reserve(kDefaultSlowPathsCapacity);
205 }
206
207 static constexpr size_t kDefaultSlowPathsCapacity = 8;
208
209 ScopedArenaAllocator allocator_;
210 StackMapStream stack_map_stream_;
211 ScopedArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
212
213 // Maps a StringReference (dex_file, string_index) to the index in the literal table.
214 // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
215 // will compute all the indices.
216 ScopedArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
217
218 // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
219 // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
220 // will compute all the indices.
221 ScopedArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
222 };
223
EmitJitRoots(std::vector<Handle<mirror::Object>> * roots)224 void CodeGenerator::CodeGenerationData::EmitJitRoots(
225 /*out*/std::vector<Handle<mirror::Object>>* roots) {
226 DCHECK(roots->empty());
227 roots->reserve(GetNumberOfJitRoots());
228 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
229 size_t index = 0;
230 for (auto& entry : jit_string_roots_) {
231 // Update the `roots` with the string, and replace the address temporarily
232 // stored to the index in the table.
233 uint64_t address = entry.second;
234 roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
235 DCHECK(roots->back() != nullptr);
236 DCHECK(roots->back()->IsString());
237 entry.second = index;
238 // Ensure the string is strongly interned. This is a requirement on how the JIT
239 // handles strings. b/32995596
240 class_linker->GetInternTable()->InternStrong(roots->back()->AsString());
241 ++index;
242 }
243 for (auto& entry : jit_class_roots_) {
244 // Update the `roots` with the class, and replace the address temporarily
245 // stored to the index in the table.
246 uint64_t address = entry.second;
247 roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
248 DCHECK(roots->back() != nullptr);
249 DCHECK(roots->back()->IsClass());
250 entry.second = index;
251 ++index;
252 }
253 }
254
GetScopedAllocator()255 ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
256 DCHECK(code_generation_data_ != nullptr);
257 return code_generation_data_->GetScopedAllocator();
258 }
259
GetStackMapStream()260 StackMapStream* CodeGenerator::GetStackMapStream() {
261 DCHECK(code_generation_data_ != nullptr);
262 return code_generation_data_->GetStackMapStream();
263 }
264
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)265 void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
266 Handle<mirror::String> string) {
267 DCHECK(code_generation_data_ != nullptr);
268 code_generation_data_->ReserveJitStringRoot(string_reference, string);
269 }
270
GetJitStringRootIndex(StringReference string_reference)271 uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
272 DCHECK(code_generation_data_ != nullptr);
273 return code_generation_data_->GetJitStringRootIndex(string_reference);
274 }
275
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)276 void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
277 DCHECK(code_generation_data_ != nullptr);
278 code_generation_data_->ReserveJitClassRoot(type_reference, klass);
279 }
280
GetJitClassRootIndex(TypeReference type_reference)281 uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
282 DCHECK(code_generation_data_ != nullptr);
283 return code_generation_data_->GetJitClassRootIndex(type_reference);
284 }
285
EmitJitRootPatches(uint8_t * code ATTRIBUTE_UNUSED,const uint8_t * roots_data ATTRIBUTE_UNUSED)286 void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
287 const uint8_t* roots_data ATTRIBUTE_UNUSED) {
288 DCHECK(code_generation_data_ != nullptr);
289 DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
290 DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
291 }
292
GetArrayLengthOffset(HArrayLength * array_length)293 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
294 return array_length->IsStringLength()
295 ? mirror::String::CountOffset().Uint32Value()
296 : mirror::Array::LengthOffset().Uint32Value();
297 }
298
GetArrayDataOffset(HArrayGet * array_get)299 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
300 DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt());
301 return array_get->IsStringCharAt()
302 ? mirror::String::ValueOffset().Uint32Value()
303 : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value();
304 }
305
GoesToNextBlock(HBasicBlock * current,HBasicBlock * next) const306 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
307 DCHECK_EQ((*block_order_)[current_block_index_], current);
308 return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
309 }
310
GetNextBlockToEmit() const311 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
312 for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
313 HBasicBlock* block = (*block_order_)[i];
314 if (!block->IsSingleJump()) {
315 return block;
316 }
317 }
318 return nullptr;
319 }
320
FirstNonEmptyBlock(HBasicBlock * block) const321 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
322 while (block->IsSingleJump()) {
323 block = block->GetSuccessors()[0];
324 }
325 return block;
326 }
327
328 class DisassemblyScope {
329 public:
DisassemblyScope(HInstruction * instruction,const CodeGenerator & codegen)330 DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
331 : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
332 if (codegen_.GetDisassemblyInformation() != nullptr) {
333 start_offset_ = codegen_.GetAssembler().CodeSize();
334 }
335 }
336
~DisassemblyScope()337 ~DisassemblyScope() {
338 // We avoid building this data when we know it will not be used.
339 if (codegen_.GetDisassemblyInformation() != nullptr) {
340 codegen_.GetDisassemblyInformation()->AddInstructionInterval(
341 instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
342 }
343 }
344
345 private:
346 const CodeGenerator& codegen_;
347 HInstruction* instruction_;
348 size_t start_offset_;
349 };
350
351
GenerateSlowPaths()352 void CodeGenerator::GenerateSlowPaths() {
353 DCHECK(code_generation_data_ != nullptr);
354 size_t code_start = 0;
355 for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
356 SlowPathCode* slow_path = slow_path_ptr.get();
357 current_slow_path_ = slow_path;
358 if (disasm_info_ != nullptr) {
359 code_start = GetAssembler()->CodeSize();
360 }
361 // Record the dex pc at start of slow path (required for java line number mapping).
362 MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
363 slow_path->EmitNativeCode(this);
364 if (disasm_info_ != nullptr) {
365 disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
366 }
367 }
368 current_slow_path_ = nullptr;
369 }
370
InitializeCodeGenerationData()371 void CodeGenerator::InitializeCodeGenerationData() {
372 DCHECK(code_generation_data_ == nullptr);
373 code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
374 }
375
Compile(CodeAllocator * allocator)376 void CodeGenerator::Compile(CodeAllocator* allocator) {
377 InitializeCodeGenerationData();
378
379 // The register allocator already called `InitializeCodeGeneration`,
380 // where the frame size has been computed.
381 DCHECK(block_order_ != nullptr);
382 Initialize();
383
384 HGraphVisitor* instruction_visitor = GetInstructionVisitor();
385 DCHECK_EQ(current_block_index_, 0u);
386
387 GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_,
388 core_spill_mask_,
389 fpu_spill_mask_,
390 GetGraph()->GetNumberOfVRegs(),
391 GetGraph()->IsCompilingBaseline());
392
393 size_t frame_start = GetAssembler()->CodeSize();
394 GenerateFrameEntry();
395 DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
396 if (disasm_info_ != nullptr) {
397 disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
398 }
399
400 for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
401 HBasicBlock* block = (*block_order_)[current_block_index_];
402 // Don't generate code for an empty block. Its predecessors will branch to its successor
403 // directly. Also, the label of that block will not be emitted, so this helps catch
404 // errors where we reference that label.
405 if (block->IsSingleJump()) continue;
406 Bind(block);
407 // This ensures that we have correct native line mapping for all native instructions.
408 // It is necessary to make stepping over a statement work. Otherwise, any initial
409 // instructions (e.g. moves) would be assumed to be the start of next statement.
410 MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
411 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
412 HInstruction* current = it.Current();
413 if (current->HasEnvironment()) {
414 // Create stackmap for HNativeDebugInfo or any instruction which calls native code.
415 // Note that we need correct mapping for the native PC of the call instruction,
416 // so the runtime's stackmap is not sufficient since it is at PC after the call.
417 MaybeRecordNativeDebugInfo(current, block->GetDexPc());
418 }
419 DisassemblyScope disassembly_scope(current, *this);
420 DCHECK(CheckTypeConsistency(current));
421 current->Accept(instruction_visitor);
422 }
423 }
424
425 GenerateSlowPaths();
426
427 // Emit catch stack maps at the end of the stack map stream as expected by the
428 // runtime exception handler.
429 if (graph_->HasTryCatch()) {
430 RecordCatchBlockInfo();
431 }
432
433 // Finalize instructions in assember;
434 Finalize(allocator);
435
436 GetStackMapStream()->EndMethod();
437 }
438
Finalize(CodeAllocator * allocator)439 void CodeGenerator::Finalize(CodeAllocator* allocator) {
440 size_t code_size = GetAssembler()->CodeSize();
441 uint8_t* buffer = allocator->Allocate(code_size);
442
443 MemoryRegion code(buffer, code_size);
444 GetAssembler()->FinalizeInstructions(code);
445 }
446
EmitLinkerPatches(ArenaVector<linker::LinkerPatch> * linker_patches ATTRIBUTE_UNUSED)447 void CodeGenerator::EmitLinkerPatches(
448 ArenaVector<linker::LinkerPatch>* linker_patches ATTRIBUTE_UNUSED) {
449 // No linker patches by default.
450 }
451
NeedsThunkCode(const linker::LinkerPatch & patch ATTRIBUTE_UNUSED) const452 bool CodeGenerator::NeedsThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED) const {
453 // Code generators that create patches requiring thunk compilation should override this function.
454 return false;
455 }
456
EmitThunkCode(const linker::LinkerPatch & patch ATTRIBUTE_UNUSED,ArenaVector<uint8_t> * code ATTRIBUTE_UNUSED,std::string * debug_name ATTRIBUTE_UNUSED)457 void CodeGenerator::EmitThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED,
458 /*out*/ ArenaVector<uint8_t>* code ATTRIBUTE_UNUSED,
459 /*out*/ std::string* debug_name ATTRIBUTE_UNUSED) {
460 // Code generators that create patches requiring thunk compilation should override this function.
461 LOG(FATAL) << "Unexpected call to EmitThunkCode().";
462 }
463
InitializeCodeGeneration(size_t number_of_spill_slots,size_t maximum_safepoint_spill_size,size_t number_of_out_slots,const ArenaVector<HBasicBlock * > & block_order)464 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
465 size_t maximum_safepoint_spill_size,
466 size_t number_of_out_slots,
467 const ArenaVector<HBasicBlock*>& block_order) {
468 block_order_ = &block_order;
469 DCHECK(!block_order.empty());
470 DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
471 ComputeSpillMask();
472 first_register_slot_in_slow_path_ = RoundUp(
473 (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
474
475 if (number_of_spill_slots == 0
476 && !HasAllocatedCalleeSaveRegisters()
477 && IsLeafMethod()
478 && !RequiresCurrentMethod()) {
479 DCHECK_EQ(maximum_safepoint_spill_size, 0u);
480 SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
481 } else {
482 SetFrameSize(RoundUp(
483 first_register_slot_in_slow_path_
484 + maximum_safepoint_spill_size
485 + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
486 + FrameEntrySpillSize(),
487 kStackAlignment));
488 }
489 }
490
CreateCommonInvokeLocationSummary(HInvoke * invoke,InvokeDexCallingConventionVisitor * visitor)491 void CodeGenerator::CreateCommonInvokeLocationSummary(
492 HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
493 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
494 LocationSummary* locations = new (allocator) LocationSummary(invoke,
495 LocationSummary::kCallOnMainOnly);
496
497 for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
498 HInstruction* input = invoke->InputAt(i);
499 locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
500 }
501
502 locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
503
504 if (invoke->IsInvokeStaticOrDirect()) {
505 HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
506 switch (call->GetMethodLoadKind()) {
507 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
508 locations->SetInAt(call->GetSpecialInputIndex(), visitor->GetMethodLocation());
509 break;
510 case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall:
511 locations->AddTemp(visitor->GetMethodLocation());
512 locations->SetInAt(call->GetSpecialInputIndex(), Location::RequiresRegister());
513 break;
514 default:
515 locations->AddTemp(visitor->GetMethodLocation());
516 break;
517 }
518 } else if (!invoke->IsInvokePolymorphic()) {
519 locations->AddTemp(visitor->GetMethodLocation());
520 }
521 }
522
GenerateInvokeStaticOrDirectRuntimeCall(HInvokeStaticOrDirect * invoke,Location temp,SlowPathCode * slow_path)523 void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
524 HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
525 MoveConstant(temp, invoke->GetDexMethodIndex());
526
527 // The access check is unnecessary but we do not want to introduce
528 // extra entrypoints for the codegens that do not support some
529 // invoke type and fall back to the runtime call.
530
531 // Initialize to anything to silent compiler warnings.
532 QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
533 switch (invoke->GetInvokeType()) {
534 case kStatic:
535 entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
536 break;
537 case kDirect:
538 entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
539 break;
540 case kSuper:
541 entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
542 break;
543 case kVirtual:
544 case kInterface:
545 case kPolymorphic:
546 case kCustom:
547 LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
548 UNREACHABLE();
549 }
550
551 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
552 }
GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved * invoke)553 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
554 MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetDexMethodIndex());
555
556 // Initialize to anything to silent compiler warnings.
557 QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
558 switch (invoke->GetInvokeType()) {
559 case kStatic:
560 entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
561 break;
562 case kDirect:
563 entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
564 break;
565 case kVirtual:
566 entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
567 break;
568 case kSuper:
569 entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
570 break;
571 case kInterface:
572 entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
573 break;
574 case kPolymorphic:
575 case kCustom:
576 LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
577 UNREACHABLE();
578 }
579 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
580 }
581
GenerateInvokePolymorphicCall(HInvokePolymorphic * invoke)582 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
583 // invoke-polymorphic does not use a temporary to convey any additional information (e.g. a
584 // method index) since it requires multiple info from the instruction (registers A, B, H). Not
585 // using the reservation has no effect on the registers used in the runtime call.
586 QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
587 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
588 }
589
GenerateInvokeCustomCall(HInvokeCustom * invoke)590 void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) {
591 MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetCallSiteIndex());
592 QuickEntrypointEnum entrypoint = kQuickInvokeCustom;
593 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
594 }
595
CreateStringBuilderAppendLocations(HStringBuilderAppend * instruction,Location out)596 void CodeGenerator::CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction,
597 Location out) {
598 ArenaAllocator* allocator = GetGraph()->GetAllocator();
599 LocationSummary* locations =
600 new (allocator) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
601 locations->SetOut(out);
602 instruction->GetLocations()->SetInAt(instruction->FormatIndex(),
603 Location::ConstantLocation(instruction->GetFormat()));
604
605 uint32_t format = static_cast<uint32_t>(instruction->GetFormat()->GetValue());
606 uint32_t f = format;
607 PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
608 size_t stack_offset = static_cast<size_t>(pointer_size); // Start after the ArtMethod*.
609 for (size_t i = 0, num_args = instruction->GetNumberOfArguments(); i != num_args; ++i) {
610 StringBuilderAppend::Argument arg_type =
611 static_cast<StringBuilderAppend::Argument>(f & StringBuilderAppend::kArgMask);
612 switch (arg_type) {
613 case StringBuilderAppend::Argument::kStringBuilder:
614 case StringBuilderAppend::Argument::kString:
615 case StringBuilderAppend::Argument::kCharArray:
616 static_assert(sizeof(StackReference<mirror::Object>) == sizeof(uint32_t), "Size check.");
617 FALLTHROUGH_INTENDED;
618 case StringBuilderAppend::Argument::kBoolean:
619 case StringBuilderAppend::Argument::kChar:
620 case StringBuilderAppend::Argument::kInt:
621 case StringBuilderAppend::Argument::kFloat:
622 locations->SetInAt(i, Location::StackSlot(stack_offset));
623 break;
624 case StringBuilderAppend::Argument::kLong:
625 case StringBuilderAppend::Argument::kDouble:
626 stack_offset = RoundUp(stack_offset, sizeof(uint64_t));
627 locations->SetInAt(i, Location::DoubleStackSlot(stack_offset));
628 // Skip the low word, let the common code skip the high word.
629 stack_offset += sizeof(uint32_t);
630 break;
631 default:
632 LOG(FATAL) << "Unexpected arg format: 0x" << std::hex
633 << (f & StringBuilderAppend::kArgMask) << " full format: 0x" << format;
634 UNREACHABLE();
635 }
636 f >>= StringBuilderAppend::kBitsPerArg;
637 stack_offset += sizeof(uint32_t);
638 }
639 DCHECK_EQ(f, 0u);
640
641 size_t param_size = stack_offset - static_cast<size_t>(pointer_size);
642 DCHECK_ALIGNED(param_size, kVRegSize);
643 size_t num_vregs = param_size / kVRegSize;
644 graph_->UpdateMaximumNumberOfOutVRegs(num_vregs);
645 }
646
CreateUnresolvedFieldLocationSummary(HInstruction * field_access,DataType::Type field_type,const FieldAccessCallingConvention & calling_convention)647 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
648 HInstruction* field_access,
649 DataType::Type field_type,
650 const FieldAccessCallingConvention& calling_convention) {
651 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
652 || field_access->IsUnresolvedInstanceFieldSet();
653 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
654 || field_access->IsUnresolvedStaticFieldGet();
655
656 ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
657 LocationSummary* locations =
658 new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
659
660 locations->AddTemp(calling_convention.GetFieldIndexLocation());
661
662 if (is_instance) {
663 // Add the `this` object for instance field accesses.
664 locations->SetInAt(0, calling_convention.GetObjectLocation());
665 }
666
667 // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
668 // regardless of the the type. Because of that we forced to special case
669 // the access to floating point values.
670 if (is_get) {
671 if (DataType::IsFloatingPointType(field_type)) {
672 // The return value will be stored in regular registers while register
673 // allocator expects it in a floating point register.
674 // Note We don't need to request additional temps because the return
675 // register(s) are already blocked due the call and they may overlap with
676 // the input or field index.
677 // The transfer between the two will be done at codegen level.
678 locations->SetOut(calling_convention.GetFpuLocation(field_type));
679 } else {
680 locations->SetOut(calling_convention.GetReturnLocation(field_type));
681 }
682 } else {
683 size_t set_index = is_instance ? 1 : 0;
684 if (DataType::IsFloatingPointType(field_type)) {
685 // The set value comes from a float location while the calling convention
686 // expects it in a regular register location. Allocate a temp for it and
687 // make the transfer at codegen.
688 AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
689 locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
690 } else {
691 locations->SetInAt(set_index,
692 calling_convention.GetSetValueLocation(field_type, is_instance));
693 }
694 }
695 }
696
GenerateUnresolvedFieldAccess(HInstruction * field_access,DataType::Type field_type,uint32_t field_index,uint32_t dex_pc,const FieldAccessCallingConvention & calling_convention)697 void CodeGenerator::GenerateUnresolvedFieldAccess(
698 HInstruction* field_access,
699 DataType::Type field_type,
700 uint32_t field_index,
701 uint32_t dex_pc,
702 const FieldAccessCallingConvention& calling_convention) {
703 LocationSummary* locations = field_access->GetLocations();
704
705 MoveConstant(locations->GetTemp(0), field_index);
706
707 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
708 || field_access->IsUnresolvedInstanceFieldSet();
709 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
710 || field_access->IsUnresolvedStaticFieldGet();
711
712 if (!is_get && DataType::IsFloatingPointType(field_type)) {
713 // Copy the float value to be set into the calling convention register.
714 // Note that using directly the temp location is problematic as we don't
715 // support temp register pairs. To avoid boilerplate conversion code, use
716 // the location from the calling convention.
717 MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
718 locations->InAt(is_instance ? 1 : 0),
719 (DataType::Is64BitType(field_type) ? DataType::Type::kInt64
720 : DataType::Type::kInt32));
721 }
722
723 QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings.
724 switch (field_type) {
725 case DataType::Type::kBool:
726 entrypoint = is_instance
727 ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
728 : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
729 break;
730 case DataType::Type::kInt8:
731 entrypoint = is_instance
732 ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
733 : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
734 break;
735 case DataType::Type::kInt16:
736 entrypoint = is_instance
737 ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
738 : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
739 break;
740 case DataType::Type::kUint16:
741 entrypoint = is_instance
742 ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
743 : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
744 break;
745 case DataType::Type::kInt32:
746 case DataType::Type::kFloat32:
747 entrypoint = is_instance
748 ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
749 : (is_get ? kQuickGet32Static : kQuickSet32Static);
750 break;
751 case DataType::Type::kReference:
752 entrypoint = is_instance
753 ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
754 : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
755 break;
756 case DataType::Type::kInt64:
757 case DataType::Type::kFloat64:
758 entrypoint = is_instance
759 ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
760 : (is_get ? kQuickGet64Static : kQuickSet64Static);
761 break;
762 default:
763 LOG(FATAL) << "Invalid type " << field_type;
764 }
765 InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
766
767 if (is_get && DataType::IsFloatingPointType(field_type)) {
768 MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
769 }
770 }
771
CreateLoadClassRuntimeCallLocationSummary(HLoadClass * cls,Location runtime_type_index_location,Location runtime_return_location)772 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
773 Location runtime_type_index_location,
774 Location runtime_return_location) {
775 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
776 DCHECK_EQ(cls->InputCount(), 1u);
777 LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
778 cls, LocationSummary::kCallOnMainOnly);
779 locations->SetInAt(0, Location::NoLocation());
780 locations->AddTemp(runtime_type_index_location);
781 locations->SetOut(runtime_return_location);
782 }
783
GenerateLoadClassRuntimeCall(HLoadClass * cls)784 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
785 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
786 DCHECK(!cls->MustGenerateClinitCheck());
787 LocationSummary* locations = cls->GetLocations();
788 MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
789 if (cls->NeedsAccessCheck()) {
790 CheckEntrypointTypes<kQuickResolveTypeAndVerifyAccess, void*, uint32_t>();
791 InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls, cls->GetDexPc());
792 } else {
793 CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
794 InvokeRuntime(kQuickResolveType, cls, cls->GetDexPc());
795 }
796 }
797
CreateLoadMethodHandleRuntimeCallLocationSummary(HLoadMethodHandle * method_handle,Location runtime_proto_index_location,Location runtime_return_location)798 void CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(
799 HLoadMethodHandle* method_handle,
800 Location runtime_proto_index_location,
801 Location runtime_return_location) {
802 DCHECK_EQ(method_handle->InputCount(), 1u);
803 LocationSummary* locations =
804 new (method_handle->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
805 method_handle, LocationSummary::kCallOnMainOnly);
806 locations->SetInAt(0, Location::NoLocation());
807 locations->AddTemp(runtime_proto_index_location);
808 locations->SetOut(runtime_return_location);
809 }
810
GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle * method_handle)811 void CodeGenerator::GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle) {
812 LocationSummary* locations = method_handle->GetLocations();
813 MoveConstant(locations->GetTemp(0), method_handle->GetMethodHandleIndex());
814 CheckEntrypointTypes<kQuickResolveMethodHandle, void*, uint32_t>();
815 InvokeRuntime(kQuickResolveMethodHandle, method_handle, method_handle->GetDexPc());
816 }
817
CreateLoadMethodTypeRuntimeCallLocationSummary(HLoadMethodType * method_type,Location runtime_proto_index_location,Location runtime_return_location)818 void CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(
819 HLoadMethodType* method_type,
820 Location runtime_proto_index_location,
821 Location runtime_return_location) {
822 DCHECK_EQ(method_type->InputCount(), 1u);
823 LocationSummary* locations =
824 new (method_type->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
825 method_type, LocationSummary::kCallOnMainOnly);
826 locations->SetInAt(0, Location::NoLocation());
827 locations->AddTemp(runtime_proto_index_location);
828 locations->SetOut(runtime_return_location);
829 }
830
GenerateLoadMethodTypeRuntimeCall(HLoadMethodType * method_type)831 void CodeGenerator::GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type) {
832 LocationSummary* locations = method_type->GetLocations();
833 MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex().index_);
834 CheckEntrypointTypes<kQuickResolveMethodType, void*, uint32_t>();
835 InvokeRuntime(kQuickResolveMethodType, method_type, method_type->GetDexPc());
836 }
837
GetBootImageOffsetImpl(const void * object,ImageHeader::ImageSections section)838 static uint32_t GetBootImageOffsetImpl(const void* object, ImageHeader::ImageSections section) {
839 Runtime* runtime = Runtime::Current();
840 DCHECK(runtime->IsAotCompiler());
841 const std::vector<gc::space::ImageSpace*>& boot_image_spaces =
842 runtime->GetHeap()->GetBootImageSpaces();
843 // Check that the `object` is in the expected section of one of the boot image files.
844 DCHECK(std::any_of(boot_image_spaces.begin(),
845 boot_image_spaces.end(),
846 [object, section](gc::space::ImageSpace* space) {
847 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
848 uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
849 return space->GetImageHeader().GetImageSection(section).Contains(offset);
850 }));
851 uintptr_t begin = reinterpret_cast<uintptr_t>(boot_image_spaces.front()->Begin());
852 uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
853 return dchecked_integral_cast<uint32_t>(offset);
854 }
855
856 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffset(HLoadClass * load_class)857 uint32_t CodeGenerator::GetBootImageOffset(HLoadClass* load_class) NO_THREAD_SAFETY_ANALYSIS {
858 DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kBootImageRelRo);
859 ObjPtr<mirror::Class> klass = load_class->GetClass().Get();
860 DCHECK(klass != nullptr);
861 return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
862 }
863
864 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image strings are non-moveable.
GetBootImageOffset(HLoadString * load_string)865 uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_SAFETY_ANALYSIS {
866 DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kBootImageRelRo);
867 ObjPtr<mirror::String> string = load_string->GetString().Get();
868 DCHECK(string != nullptr);
869 return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
870 }
871
GetBootImageOffset(HInvokeStaticOrDirect * invoke)872 uint32_t CodeGenerator::GetBootImageOffset(HInvokeStaticOrDirect* invoke) {
873 DCHECK_EQ(invoke->GetMethodLoadKind(), HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo);
874 ArtMethod* method = invoke->GetResolvedMethod();
875 DCHECK(method != nullptr);
876 return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
877 }
878
BlockIfInRegister(Location location,bool is_out) const879 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
880 // The DCHECKS below check that a register is not specified twice in
881 // the summary. The out location can overlap with an input, so we need
882 // to special case it.
883 if (location.IsRegister()) {
884 DCHECK(is_out || !blocked_core_registers_[location.reg()]);
885 blocked_core_registers_[location.reg()] = true;
886 } else if (location.IsFpuRegister()) {
887 DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
888 blocked_fpu_registers_[location.reg()] = true;
889 } else if (location.IsFpuRegisterPair()) {
890 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
891 blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
892 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
893 blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
894 } else if (location.IsRegisterPair()) {
895 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
896 blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
897 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
898 blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
899 }
900 }
901
AllocateLocations(HInstruction * instruction)902 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
903 for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
904 env->AllocateLocations();
905 }
906 instruction->Accept(GetLocationBuilder());
907 DCHECK(CheckTypeConsistency(instruction));
908 LocationSummary* locations = instruction->GetLocations();
909 if (!instruction->IsSuspendCheckEntry()) {
910 if (locations != nullptr) {
911 if (locations->CanCall()) {
912 MarkNotLeaf();
913 } else if (locations->Intrinsified() &&
914 instruction->IsInvokeStaticOrDirect() &&
915 !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
916 // A static method call that has been fully intrinsified, and cannot call on the slow
917 // path or refer to the current method directly, no longer needs current method.
918 return;
919 }
920 }
921 if (instruction->NeedsCurrentMethod()) {
922 SetRequiresCurrentMethod();
923 }
924 }
925 }
926
Create(HGraph * graph,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)927 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
928 const CompilerOptions& compiler_options,
929 OptimizingCompilerStats* stats) {
930 ArenaAllocator* allocator = graph->GetAllocator();
931 switch (compiler_options.GetInstructionSet()) {
932 #ifdef ART_ENABLE_CODEGEN_arm
933 case InstructionSet::kArm:
934 case InstructionSet::kThumb2: {
935 return std::unique_ptr<CodeGenerator>(
936 new (allocator) arm::CodeGeneratorARMVIXL(graph, compiler_options, stats));
937 }
938 #endif
939 #ifdef ART_ENABLE_CODEGEN_arm64
940 case InstructionSet::kArm64: {
941 return std::unique_ptr<CodeGenerator>(
942 new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
943 }
944 #endif
945 #ifdef ART_ENABLE_CODEGEN_x86
946 case InstructionSet::kX86: {
947 return std::unique_ptr<CodeGenerator>(
948 new (allocator) x86::CodeGeneratorX86(graph, compiler_options, stats));
949 }
950 #endif
951 #ifdef ART_ENABLE_CODEGEN_x86_64
952 case InstructionSet::kX86_64: {
953 return std::unique_ptr<CodeGenerator>(
954 new (allocator) x86_64::CodeGeneratorX86_64(graph, compiler_options, stats));
955 }
956 #endif
957 default:
958 return nullptr;
959 }
960 }
961
CodeGenerator(HGraph * graph,size_t number_of_core_registers,size_t number_of_fpu_registers,size_t number_of_register_pairs,uint32_t core_callee_save_mask,uint32_t fpu_callee_save_mask,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)962 CodeGenerator::CodeGenerator(HGraph* graph,
963 size_t number_of_core_registers,
964 size_t number_of_fpu_registers,
965 size_t number_of_register_pairs,
966 uint32_t core_callee_save_mask,
967 uint32_t fpu_callee_save_mask,
968 const CompilerOptions& compiler_options,
969 OptimizingCompilerStats* stats)
970 : frame_size_(0),
971 core_spill_mask_(0),
972 fpu_spill_mask_(0),
973 first_register_slot_in_slow_path_(0),
974 allocated_registers_(RegisterSet::Empty()),
975 blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
976 kArenaAllocCodeGenerator)),
977 blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
978 kArenaAllocCodeGenerator)),
979 number_of_core_registers_(number_of_core_registers),
980 number_of_fpu_registers_(number_of_fpu_registers),
981 number_of_register_pairs_(number_of_register_pairs),
982 core_callee_save_mask_(core_callee_save_mask),
983 fpu_callee_save_mask_(fpu_callee_save_mask),
984 block_order_(nullptr),
985 disasm_info_(nullptr),
986 stats_(stats),
987 graph_(graph),
988 compiler_options_(compiler_options),
989 current_slow_path_(nullptr),
990 current_block_index_(0),
991 is_leaf_(true),
992 requires_current_method_(false),
993 code_generation_data_() {
994 if (GetGraph()->IsCompilingOsr()) {
995 // Make OSR methods have all registers spilled, this simplifies the logic of
996 // jumping to the compiled code directly.
997 for (size_t i = 0; i < number_of_core_registers_; ++i) {
998 if (IsCoreCalleeSaveRegister(i)) {
999 AddAllocatedRegister(Location::RegisterLocation(i));
1000 }
1001 }
1002 for (size_t i = 0; i < number_of_fpu_registers_; ++i) {
1003 if (IsFloatingPointCalleeSaveRegister(i)) {
1004 AddAllocatedRegister(Location::FpuRegisterLocation(i));
1005 }
1006 }
1007 }
1008 }
1009
~CodeGenerator()1010 CodeGenerator::~CodeGenerator() {}
1011
GetNumberOfJitRoots() const1012 size_t CodeGenerator::GetNumberOfJitRoots() const {
1013 DCHECK(code_generation_data_ != nullptr);
1014 return code_generation_data_->GetNumberOfJitRoots();
1015 }
1016
CheckCovers(uint32_t dex_pc,const HGraph & graph,const CodeInfo & code_info,const ArenaVector<HSuspendCheck * > & loop_headers,ArenaVector<size_t> * covered)1017 static void CheckCovers(uint32_t dex_pc,
1018 const HGraph& graph,
1019 const CodeInfo& code_info,
1020 const ArenaVector<HSuspendCheck*>& loop_headers,
1021 ArenaVector<size_t>* covered) {
1022 for (size_t i = 0; i < loop_headers.size(); ++i) {
1023 if (loop_headers[i]->GetDexPc() == dex_pc) {
1024 if (graph.IsCompilingOsr()) {
1025 DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid());
1026 }
1027 ++(*covered)[i];
1028 }
1029 }
1030 }
1031
1032 // Debug helper to ensure loop entries in compiled code are matched by
1033 // dex branch instructions.
CheckLoopEntriesCanBeUsedForOsr(const HGraph & graph,const CodeInfo & code_info,const dex::CodeItem & code_item)1034 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
1035 const CodeInfo& code_info,
1036 const dex::CodeItem& code_item) {
1037 if (graph.HasTryCatch()) {
1038 // One can write loops through try/catch, which we do not support for OSR anyway.
1039 return;
1040 }
1041 ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
1042 for (HBasicBlock* block : graph.GetReversePostOrder()) {
1043 if (block->IsLoopHeader()) {
1044 HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
1045 if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
1046 loop_headers.push_back(suspend_check);
1047 }
1048 }
1049 }
1050 ArenaVector<size_t> covered(
1051 loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
1052 for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(),
1053 &code_item)) {
1054 const uint32_t dex_pc = pair.DexPc();
1055 const Instruction& instruction = pair.Inst();
1056 if (instruction.IsBranch()) {
1057 uint32_t target = dex_pc + instruction.GetTargetOffset();
1058 CheckCovers(target, graph, code_info, loop_headers, &covered);
1059 } else if (instruction.IsSwitch()) {
1060 DexSwitchTable table(instruction, dex_pc);
1061 uint16_t num_entries = table.GetNumEntries();
1062 size_t offset = table.GetFirstValueIndex();
1063
1064 // Use a larger loop counter type to avoid overflow issues.
1065 for (size_t i = 0; i < num_entries; ++i) {
1066 // The target of the case.
1067 uint32_t target = dex_pc + table.GetEntryAt(i + offset);
1068 CheckCovers(target, graph, code_info, loop_headers, &covered);
1069 }
1070 }
1071 }
1072
1073 for (size_t i = 0; i < covered.size(); ++i) {
1074 DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
1075 }
1076 }
1077
BuildStackMaps(const dex::CodeItem * code_item)1078 ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
1079 ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
1080 if (kIsDebugBuild && code_item != nullptr) {
1081 CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
1082 }
1083 return stack_map;
1084 }
1085
1086 // Returns whether stackmap dex register info is needed for the instruction.
1087 //
1088 // The following cases mandate having a dex register map:
1089 // * Deoptimization
1090 // when we need to obtain the values to restore actual vregisters for interpreter.
1091 // * Debuggability
1092 // when we want to observe the values / asynchronously deoptimize.
1093 // * Monitor operations
1094 // to allow dumping in a stack trace locked dex registers for non-debuggable code.
1095 // * On-stack-replacement (OSR)
1096 // when entering compiled for OSR code from the interpreter we need to initialize the compiled
1097 // code values with the values from the vregisters.
1098 // * Method local catch blocks
1099 // a catch block must see the environment of the instruction from the same method that can
1100 // throw to this block.
NeedsVregInfo(HInstruction * instruction,bool osr)1101 static bool NeedsVregInfo(HInstruction* instruction, bool osr) {
1102 HGraph* graph = instruction->GetBlock()->GetGraph();
1103 return instruction->IsDeoptimize() ||
1104 graph->IsDebuggable() ||
1105 graph->HasMonitorOperations() ||
1106 osr ||
1107 instruction->CanThrowIntoCatchBlock();
1108 }
1109
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path,bool native_debug_info)1110 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1111 uint32_t dex_pc,
1112 SlowPathCode* slow_path,
1113 bool native_debug_info) {
1114 RecordPcInfo(instruction, dex_pc, GetAssembler()->CodePosition(), slow_path, native_debug_info);
1115 }
1116
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,uint32_t native_pc,SlowPathCode * slow_path,bool native_debug_info)1117 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1118 uint32_t dex_pc,
1119 uint32_t native_pc,
1120 SlowPathCode* slow_path,
1121 bool native_debug_info) {
1122 if (instruction != nullptr) {
1123 // The code generated for some type conversions
1124 // may call the runtime, thus normally requiring a subsequent
1125 // call to this method. However, the method verifier does not
1126 // produce PC information for certain instructions, which are
1127 // considered "atomic" (they cannot join a GC).
1128 // Therefore we do not currently record PC information for such
1129 // instructions. As this may change later, we added this special
1130 // case so that code generators may nevertheless call
1131 // CodeGenerator::RecordPcInfo without triggering an error in
1132 // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
1133 // thereafter.
1134 if (instruction->IsTypeConversion()) {
1135 return;
1136 }
1137 if (instruction->IsRem()) {
1138 DataType::Type type = instruction->AsRem()->GetResultType();
1139 if ((type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64)) {
1140 return;
1141 }
1142 }
1143 }
1144
1145 StackMapStream* stack_map_stream = GetStackMapStream();
1146 if (instruction == nullptr) {
1147 // For stack overflow checks and native-debug-info entries without dex register
1148 // mapping (i.e. start of basic block or start of slow path).
1149 stack_map_stream->BeginStackMapEntry(dex_pc, native_pc);
1150 stack_map_stream->EndStackMapEntry();
1151 return;
1152 }
1153
1154 LocationSummary* locations = instruction->GetLocations();
1155 uint32_t register_mask = locations->GetRegisterMask();
1156 DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
1157 if (locations->OnlyCallsOnSlowPath()) {
1158 // In case of slow path, we currently set the location of caller-save registers
1159 // to register (instead of their stack location when pushed before the slow-path
1160 // call). Therefore register_mask contains both callee-save and caller-save
1161 // registers that hold objects. We must remove the spilled caller-save from the
1162 // mask, since they will be overwritten by the callee.
1163 uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
1164 register_mask &= ~spills;
1165 } else {
1166 // The register mask must be a subset of callee-save registers.
1167 DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
1168 }
1169
1170 uint32_t outer_dex_pc = dex_pc;
1171 uint32_t outer_environment_size = 0u;
1172 uint32_t inlining_depth = 0;
1173 HEnvironment* const environment = instruction->GetEnvironment();
1174 if (environment != nullptr) {
1175 HEnvironment* outer_environment = environment;
1176 while (outer_environment->GetParent() != nullptr) {
1177 outer_environment = outer_environment->GetParent();
1178 ++inlining_depth;
1179 }
1180 outer_dex_pc = outer_environment->GetDexPc();
1181 outer_environment_size = outer_environment->Size();
1182 }
1183
1184 HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
1185 bool osr =
1186 instruction->IsSuspendCheck() &&
1187 (info != nullptr) &&
1188 graph_->IsCompilingOsr() &&
1189 (inlining_depth == 0);
1190 StackMap::Kind kind = native_debug_info
1191 ? StackMap::Kind::Debug
1192 : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default);
1193 bool needs_vreg_info = NeedsVregInfo(instruction, osr);
1194 stack_map_stream->BeginStackMapEntry(outer_dex_pc,
1195 native_pc,
1196 register_mask,
1197 locations->GetStackMask(),
1198 kind,
1199 needs_vreg_info);
1200
1201 EmitEnvironment(environment, slow_path, needs_vreg_info);
1202 stack_map_stream->EndStackMapEntry();
1203
1204 if (osr) {
1205 DCHECK_EQ(info->GetSuspendCheck(), instruction);
1206 DCHECK(info->IsIrreducible());
1207 DCHECK(environment != nullptr);
1208 if (kIsDebugBuild) {
1209 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1210 HInstruction* in_environment = environment->GetInstructionAt(i);
1211 if (in_environment != nullptr) {
1212 DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
1213 Location location = environment->GetLocationAt(i);
1214 DCHECK(location.IsStackSlot() ||
1215 location.IsDoubleStackSlot() ||
1216 location.IsConstant() ||
1217 location.IsInvalid());
1218 if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
1219 DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
1220 }
1221 }
1222 }
1223 }
1224 }
1225 }
1226
HasStackMapAtCurrentPc()1227 bool CodeGenerator::HasStackMapAtCurrentPc() {
1228 uint32_t pc = GetAssembler()->CodeSize();
1229 StackMapStream* stack_map_stream = GetStackMapStream();
1230 size_t count = stack_map_stream->GetNumberOfStackMaps();
1231 if (count == 0) {
1232 return false;
1233 }
1234 return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc;
1235 }
1236
MaybeRecordNativeDebugInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1237 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
1238 uint32_t dex_pc,
1239 SlowPathCode* slow_path) {
1240 if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
1241 if (HasStackMapAtCurrentPc()) {
1242 // Ensure that we do not collide with the stack map of the previous instruction.
1243 GenerateNop();
1244 }
1245 RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
1246 }
1247 }
1248
RecordCatchBlockInfo()1249 void CodeGenerator::RecordCatchBlockInfo() {
1250 StackMapStream* stack_map_stream = GetStackMapStream();
1251
1252 for (HBasicBlock* block : *block_order_) {
1253 if (!block->IsCatchBlock()) {
1254 continue;
1255 }
1256
1257 uint32_t dex_pc = block->GetDexPc();
1258 uint32_t num_vregs = graph_->GetNumberOfVRegs();
1259 uint32_t native_pc = GetAddressOf(block);
1260
1261 stack_map_stream->BeginStackMapEntry(dex_pc,
1262 native_pc,
1263 /* register_mask= */ 0,
1264 /* sp_mask= */ nullptr,
1265 StackMap::Kind::Catch);
1266
1267 HInstruction* current_phi = block->GetFirstPhi();
1268 for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
1269 while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
1270 HInstruction* next_phi = current_phi->GetNext();
1271 DCHECK(next_phi == nullptr ||
1272 current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
1273 << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
1274 current_phi = next_phi;
1275 }
1276
1277 if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
1278 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1279 } else {
1280 Location location = current_phi->GetLocations()->Out();
1281 switch (location.GetKind()) {
1282 case Location::kStackSlot: {
1283 stack_map_stream->AddDexRegisterEntry(
1284 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1285 break;
1286 }
1287 case Location::kDoubleStackSlot: {
1288 stack_map_stream->AddDexRegisterEntry(
1289 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1290 stack_map_stream->AddDexRegisterEntry(
1291 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1292 ++vreg;
1293 DCHECK_LT(vreg, num_vregs);
1294 break;
1295 }
1296 default: {
1297 // All catch phis must be allocated to a stack slot.
1298 LOG(FATAL) << "Unexpected kind " << location.GetKind();
1299 UNREACHABLE();
1300 }
1301 }
1302 }
1303 }
1304
1305 stack_map_stream->EndStackMapEntry();
1306 }
1307 }
1308
AddSlowPath(SlowPathCode * slow_path)1309 void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
1310 DCHECK(code_generation_data_ != nullptr);
1311 code_generation_data_->AddSlowPath(slow_path);
1312 }
1313
EmitVRegInfo(HEnvironment * environment,SlowPathCode * slow_path)1314 void CodeGenerator::EmitVRegInfo(HEnvironment* environment, SlowPathCode* slow_path) {
1315 StackMapStream* stack_map_stream = GetStackMapStream();
1316 // Walk over the environment, and record the location of dex registers.
1317 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1318 HInstruction* current = environment->GetInstructionAt(i);
1319 if (current == nullptr) {
1320 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1321 continue;
1322 }
1323
1324 using Kind = DexRegisterLocation::Kind;
1325 Location location = environment->GetLocationAt(i);
1326 switch (location.GetKind()) {
1327 case Location::kConstant: {
1328 DCHECK_EQ(current, location.GetConstant());
1329 if (current->IsLongConstant()) {
1330 int64_t value = current->AsLongConstant()->GetValue();
1331 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1332 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1333 ++i;
1334 DCHECK_LT(i, environment_size);
1335 } else if (current->IsDoubleConstant()) {
1336 int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
1337 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1338 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1339 ++i;
1340 DCHECK_LT(i, environment_size);
1341 } else if (current->IsIntConstant()) {
1342 int32_t value = current->AsIntConstant()->GetValue();
1343 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1344 } else if (current->IsNullConstant()) {
1345 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0);
1346 } else {
1347 DCHECK(current->IsFloatConstant()) << current->DebugName();
1348 int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
1349 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1350 }
1351 break;
1352 }
1353
1354 case Location::kStackSlot: {
1355 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1356 break;
1357 }
1358
1359 case Location::kDoubleStackSlot: {
1360 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1361 stack_map_stream->AddDexRegisterEntry(
1362 Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1363 ++i;
1364 DCHECK_LT(i, environment_size);
1365 break;
1366 }
1367
1368 case Location::kRegister : {
1369 int id = location.reg();
1370 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
1371 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
1372 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1373 if (current->GetType() == DataType::Type::kInt64) {
1374 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1375 ++i;
1376 DCHECK_LT(i, environment_size);
1377 }
1378 } else {
1379 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id);
1380 if (current->GetType() == DataType::Type::kInt64) {
1381 stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id);
1382 ++i;
1383 DCHECK_LT(i, environment_size);
1384 }
1385 }
1386 break;
1387 }
1388
1389 case Location::kFpuRegister : {
1390 int id = location.reg();
1391 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
1392 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
1393 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1394 if (current->GetType() == DataType::Type::kFloat64) {
1395 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1396 ++i;
1397 DCHECK_LT(i, environment_size);
1398 }
1399 } else {
1400 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id);
1401 if (current->GetType() == DataType::Type::kFloat64) {
1402 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id);
1403 ++i;
1404 DCHECK_LT(i, environment_size);
1405 }
1406 }
1407 break;
1408 }
1409
1410 case Location::kFpuRegisterPair : {
1411 int low = location.low();
1412 int high = location.high();
1413 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
1414 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
1415 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1416 } else {
1417 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low);
1418 }
1419 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
1420 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
1421 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1422 ++i;
1423 } else {
1424 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high);
1425 ++i;
1426 }
1427 DCHECK_LT(i, environment_size);
1428 break;
1429 }
1430
1431 case Location::kRegisterPair : {
1432 int low = location.low();
1433 int high = location.high();
1434 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
1435 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
1436 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1437 } else {
1438 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low);
1439 }
1440 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
1441 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
1442 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1443 } else {
1444 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high);
1445 }
1446 ++i;
1447 DCHECK_LT(i, environment_size);
1448 break;
1449 }
1450
1451 case Location::kInvalid: {
1452 stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0);
1453 break;
1454 }
1455
1456 default:
1457 LOG(FATAL) << "Unexpected kind " << location.GetKind();
1458 }
1459 }
1460 }
1461
EmitEnvironment(HEnvironment * environment,SlowPathCode * slow_path,bool needs_vreg_info)1462 void CodeGenerator::EmitEnvironment(HEnvironment* environment,
1463 SlowPathCode* slow_path,
1464 bool needs_vreg_info) {
1465 if (environment == nullptr) return;
1466
1467 StackMapStream* stack_map_stream = GetStackMapStream();
1468 bool emit_inline_info = environment->GetParent() != nullptr;
1469
1470 if (emit_inline_info) {
1471 // We emit the parent environment first.
1472 EmitEnvironment(environment->GetParent(), slow_path, needs_vreg_info);
1473 stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
1474 environment->GetDexPc(),
1475 needs_vreg_info ? environment->Size() : 0,
1476 &graph_->GetDexFile());
1477 }
1478
1479 if (needs_vreg_info) {
1480 // If a dex register map is not required we just won't emit it.
1481 EmitVRegInfo(environment, slow_path);
1482 }
1483
1484 if (emit_inline_info) {
1485 stack_map_stream->EndInlineInfoEntry();
1486 }
1487 }
1488
CanMoveNullCheckToUser(HNullCheck * null_check)1489 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
1490 return null_check->IsEmittedAtUseSite();
1491 }
1492
MaybeRecordImplicitNullCheck(HInstruction * instr)1493 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
1494 HNullCheck* null_check = instr->GetImplicitNullCheck();
1495 if (null_check != nullptr) {
1496 RecordPcInfo(null_check, null_check->GetDexPc(), GetAssembler()->CodePosition());
1497 }
1498 }
1499
CreateThrowingSlowPathLocations(HInstruction * instruction,RegisterSet caller_saves)1500 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
1501 RegisterSet caller_saves) {
1502 // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
1503 // HSuspendCheck from entry block). However, it will still get a valid stack frame
1504 // because the HNullCheck needs an environment.
1505 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
1506 // When throwing from a try block, we may need to retrieve dalvik registers from
1507 // physical registers and we also need to set up stack mask for GC. This is
1508 // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
1509 bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
1510 if (can_throw_into_catch_block) {
1511 call_kind = LocationSummary::kCallOnSlowPath;
1512 }
1513 LocationSummary* locations =
1514 new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
1515 if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
1516 locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
1517 }
1518 DCHECK(!instruction->HasUses());
1519 return locations;
1520 }
1521
GenerateNullCheck(HNullCheck * instruction)1522 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
1523 if (compiler_options_.GetImplicitNullChecks()) {
1524 MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
1525 GenerateImplicitNullCheck(instruction);
1526 } else {
1527 MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
1528 GenerateExplicitNullCheck(instruction);
1529 }
1530 }
1531
ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck * suspend_check,HParallelMove * spills) const1532 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
1533 HParallelMove* spills) const {
1534 LocationSummary* locations = suspend_check->GetLocations();
1535 HBasicBlock* block = suspend_check->GetBlock();
1536 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1537 DCHECK(block->IsLoopHeader());
1538 DCHECK(block->GetFirstInstruction() == spills);
1539
1540 for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
1541 Location dest = spills->MoveOperandsAt(i)->GetDestination();
1542 // All parallel moves in loop headers are spills.
1543 DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
1544 // Clear the stack bit marking a reference. Do not bother to check if the spill is
1545 // actually a reference spill, clearing bits that are already zero is harmless.
1546 locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
1547 }
1548 }
1549
EmitParallelMoves(Location from1,Location to1,DataType::Type type1,Location from2,Location to2,DataType::Type type2)1550 void CodeGenerator::EmitParallelMoves(Location from1,
1551 Location to1,
1552 DataType::Type type1,
1553 Location from2,
1554 Location to2,
1555 DataType::Type type2) {
1556 HParallelMove parallel_move(GetGraph()->GetAllocator());
1557 parallel_move.AddMove(from1, to1, type1, nullptr);
1558 parallel_move.AddMove(from2, to2, type2, nullptr);
1559 GetMoveResolver()->EmitNativeCode(¶llel_move);
1560 }
1561
ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,SlowPathCode * slow_path)1562 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
1563 HInstruction* instruction,
1564 SlowPathCode* slow_path) {
1565 // Ensure that the call kind indication given to the register allocator is
1566 // coherent with the runtime call generated.
1567 if (slow_path == nullptr) {
1568 DCHECK(instruction->GetLocations()->WillCall())
1569 << "instruction->DebugName()=" << instruction->DebugName();
1570 } else {
1571 DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
1572 << "instruction->DebugName()=" << instruction->DebugName()
1573 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1574 }
1575
1576 // Check that the GC side effect is set when required.
1577 // TODO: Reverse EntrypointCanTriggerGC
1578 if (EntrypointCanTriggerGC(entrypoint)) {
1579 if (slow_path == nullptr) {
1580 DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1581 << "instruction->DebugName()=" << instruction->DebugName()
1582 << " instruction->GetSideEffects().ToString()="
1583 << instruction->GetSideEffects().ToString();
1584 } else {
1585 // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend
1586 // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However
1587 // if execution never returns to the compiled code from a GC point this restriction is
1588 // unnecessary - in particular for fatal slow paths which might trigger GC.
1589 DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) ||
1590 instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1591 // When (non-Baker) read barriers are enabled, some instructions
1592 // use a slow path to emit a read barrier, which does not trigger
1593 // GC.
1594 (kEmitCompilerReadBarrier &&
1595 !kUseBakerReadBarrier &&
1596 (instruction->IsInstanceFieldGet() ||
1597 instruction->IsStaticFieldGet() ||
1598 instruction->IsArrayGet() ||
1599 instruction->IsLoadClass() ||
1600 instruction->IsLoadString() ||
1601 instruction->IsInstanceOf() ||
1602 instruction->IsCheckCast() ||
1603 (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
1604 << "instruction->DebugName()=" << instruction->DebugName()
1605 << " instruction->GetSideEffects().ToString()="
1606 << instruction->GetSideEffects().ToString()
1607 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1608 }
1609 } else {
1610 // The GC side effect is not required for the instruction. But the instruction might still have
1611 // it, for example if it calls other entrypoints requiring it.
1612 }
1613
1614 // Check the coherency of leaf information.
1615 DCHECK(instruction->IsSuspendCheck()
1616 || ((slow_path != nullptr) && slow_path->IsFatal())
1617 || instruction->GetLocations()->CanCall()
1618 || !IsLeafMethod())
1619 << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1620 }
1621
ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction * instruction,SlowPathCode * slow_path)1622 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
1623 SlowPathCode* slow_path) {
1624 DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
1625 << "instruction->DebugName()=" << instruction->DebugName()
1626 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1627 // Only the Baker read barrier marking slow path used by certains
1628 // instructions is expected to invoke the runtime without recording
1629 // PC-related information.
1630 DCHECK(kUseBakerReadBarrier);
1631 DCHECK(instruction->IsInstanceFieldGet() ||
1632 instruction->IsStaticFieldGet() ||
1633 instruction->IsArrayGet() ||
1634 instruction->IsArraySet() ||
1635 instruction->IsLoadClass() ||
1636 instruction->IsLoadString() ||
1637 instruction->IsInstanceOf() ||
1638 instruction->IsCheckCast() ||
1639 (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()) ||
1640 (instruction->IsInvokeStaticOrDirect() && instruction->GetLocations()->Intrinsified()))
1641 << "instruction->DebugName()=" << instruction->DebugName()
1642 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1643 }
1644
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1645 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1646 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1647
1648 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1649 for (uint32_t i : LowToHighBits(core_spills)) {
1650 // If the register holds an object, update the stack mask.
1651 if (locations->RegisterContainsObject(i)) {
1652 locations->SetStackBit(stack_offset / kVRegSize);
1653 }
1654 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1655 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1656 saved_core_stack_offsets_[i] = stack_offset;
1657 stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1658 }
1659
1660 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1661 for (uint32_t i : LowToHighBits(fp_spills)) {
1662 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1663 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1664 saved_fpu_stack_offsets_[i] = stack_offset;
1665 stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1666 }
1667 }
1668
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1669 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1670 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1671
1672 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1673 for (uint32_t i : LowToHighBits(core_spills)) {
1674 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1675 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1676 stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1677 }
1678
1679 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1680 for (uint32_t i : LowToHighBits(fp_spills)) {
1681 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1682 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1683 stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1684 }
1685 }
1686
CreateSystemArrayCopyLocationSummary(HInvoke * invoke)1687 void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
1688 // Check to see if we have known failures that will cause us to have to bail out
1689 // to the runtime, and just generate the runtime call directly.
1690 HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
1691 HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
1692
1693 // The positions must be non-negative.
1694 if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
1695 (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
1696 // We will have to fail anyways.
1697 return;
1698 }
1699
1700 // The length must be >= 0.
1701 HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
1702 if (length != nullptr) {
1703 int32_t len = length->GetValue();
1704 if (len < 0) {
1705 // Just call as normal.
1706 return;
1707 }
1708 }
1709
1710 SystemArrayCopyOptimizations optimizations(invoke);
1711
1712 if (optimizations.GetDestinationIsSource()) {
1713 if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
1714 // We only support backward copying if source and destination are the same.
1715 return;
1716 }
1717 }
1718
1719 if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
1720 // We currently don't intrinsify primitive copying.
1721 return;
1722 }
1723
1724 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
1725 LocationSummary* locations = new (allocator) LocationSummary(invoke,
1726 LocationSummary::kCallOnSlowPath,
1727 kIntrinsified);
1728 // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
1729 locations->SetInAt(0, Location::RequiresRegister());
1730 locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
1731 locations->SetInAt(2, Location::RequiresRegister());
1732 locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
1733 locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
1734
1735 locations->AddTemp(Location::RequiresRegister());
1736 locations->AddTemp(Location::RequiresRegister());
1737 locations->AddTemp(Location::RequiresRegister());
1738 }
1739
EmitJitRoots(uint8_t * code,const uint8_t * roots_data,std::vector<Handle<mirror::Object>> * roots)1740 void CodeGenerator::EmitJitRoots(uint8_t* code,
1741 const uint8_t* roots_data,
1742 /*out*/std::vector<Handle<mirror::Object>>* roots) {
1743 code_generation_data_->EmitJitRoots(roots);
1744 EmitJitRootPatches(code, roots_data);
1745 }
1746
GetArrayAllocationEntrypoint(HNewArray * new_array)1747 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
1748 switch (new_array->GetComponentSizeShift()) {
1749 case 0: return kQuickAllocArrayResolved8;
1750 case 1: return kQuickAllocArrayResolved16;
1751 case 2: return kQuickAllocArrayResolved32;
1752 case 3: return kQuickAllocArrayResolved64;
1753 }
1754 LOG(FATAL) << "Unreachable";
1755 UNREACHABLE();
1756 }
1757
1758 } // namespace art
1759