1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "code_generator.h"
18 
19 #ifdef ART_ENABLE_CODEGEN_arm
20 #include "code_generator_arm_vixl.h"
21 #endif
22 
23 #ifdef ART_ENABLE_CODEGEN_arm64
24 #include "code_generator_arm64.h"
25 #endif
26 
27 #ifdef ART_ENABLE_CODEGEN_x86
28 #include "code_generator_x86.h"
29 #endif
30 
31 #ifdef ART_ENABLE_CODEGEN_x86_64
32 #include "code_generator_x86_64.h"
33 #endif
34 
35 #ifdef ART_ENABLE_CODEGEN_mips
36 #include "code_generator_mips.h"
37 #endif
38 
39 #ifdef ART_ENABLE_CODEGEN_mips64
40 #include "code_generator_mips64.h"
41 #endif
42 
43 #include "base/bit_utils.h"
44 #include "base/bit_utils_iterator.h"
45 #include "base/casts.h"
46 #include "base/leb128.h"
47 #include "class_linker.h"
48 #include "compiled_method.h"
49 #include "dex/bytecode_utils.h"
50 #include "dex/code_item_accessors-inl.h"
51 #include "dex/verified_method.h"
52 #include "driver/compiler_driver.h"
53 #include "graph_visualizer.h"
54 #include "intern_table.h"
55 #include "intrinsics.h"
56 #include "mirror/array-inl.h"
57 #include "mirror/object_array-inl.h"
58 #include "mirror/object_reference.h"
59 #include "mirror/reference.h"
60 #include "mirror/string.h"
61 #include "parallel_move_resolver.h"
62 #include "scoped_thread_state_change-inl.h"
63 #include "ssa_liveness_analysis.h"
64 #include "stack_map_stream.h"
65 #include "thread-current-inl.h"
66 #include "utils/assembler.h"
67 
68 namespace art {
69 
70 // If true, we record the static and direct invokes in the invoke infos.
71 static constexpr bool kEnableDexLayoutOptimizations = false;
72 
73 // Return whether a location is consistent with a type.
CheckType(DataType::Type type,Location location)74 static bool CheckType(DataType::Type type, Location location) {
75   if (location.IsFpuRegister()
76       || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
77     return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64);
78   } else if (location.IsRegister() ||
79              (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
80     return DataType::IsIntegralType(type) || (type == DataType::Type::kReference);
81   } else if (location.IsRegisterPair()) {
82     return type == DataType::Type::kInt64;
83   } else if (location.IsFpuRegisterPair()) {
84     return type == DataType::Type::kFloat64;
85   } else if (location.IsStackSlot()) {
86     return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64)
87            || (type == DataType::Type::kFloat32)
88            || (type == DataType::Type::kReference);
89   } else if (location.IsDoubleStackSlot()) {
90     return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
91   } else if (location.IsConstant()) {
92     if (location.GetConstant()->IsIntConstant()) {
93       return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64);
94     } else if (location.GetConstant()->IsNullConstant()) {
95       return type == DataType::Type::kReference;
96     } else if (location.GetConstant()->IsLongConstant()) {
97       return type == DataType::Type::kInt64;
98     } else if (location.GetConstant()->IsFloatConstant()) {
99       return type == DataType::Type::kFloat32;
100     } else {
101       return location.GetConstant()->IsDoubleConstant()
102           && (type == DataType::Type::kFloat64);
103     }
104   } else {
105     return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
106   }
107 }
108 
109 // Check that a location summary is consistent with an instruction.
CheckTypeConsistency(HInstruction * instruction)110 static bool CheckTypeConsistency(HInstruction* instruction) {
111   LocationSummary* locations = instruction->GetLocations();
112   if (locations == nullptr) {
113     return true;
114   }
115 
116   if (locations->Out().IsUnallocated()
117       && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
118     DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
119         << instruction->GetType()
120         << " " << locations->InAt(0);
121   } else {
122     DCHECK(CheckType(instruction->GetType(), locations->Out()))
123         << instruction->GetType()
124         << " " << locations->Out();
125   }
126 
127   HConstInputsRef inputs = instruction->GetInputs();
128   for (size_t i = 0; i < inputs.size(); ++i) {
129     DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
130       << inputs[i]->GetType() << " " << locations->InAt(i);
131   }
132 
133   HEnvironment* environment = instruction->GetEnvironment();
134   for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
135     if (environment->GetInstructionAt(i) != nullptr) {
136       DataType::Type type = environment->GetInstructionAt(i)->GetType();
137       DCHECK(CheckType(type, environment->GetLocationAt(i)))
138         << type << " " << environment->GetLocationAt(i);
139     } else {
140       DCHECK(environment->GetLocationAt(i).IsInvalid())
141         << environment->GetLocationAt(i);
142     }
143   }
144   return true;
145 }
146 
147 class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllocCodeGenerator> {
148  public:
Create(ArenaStack * arena_stack,InstructionSet instruction_set)149   static std::unique_ptr<CodeGenerationData> Create(ArenaStack* arena_stack,
150                                                     InstructionSet instruction_set) {
151     ScopedArenaAllocator allocator(arena_stack);
152     void* memory = allocator.Alloc<CodeGenerationData>(kArenaAllocCodeGenerator);
153     return std::unique_ptr<CodeGenerationData>(
154         ::new (memory) CodeGenerationData(std::move(allocator), instruction_set));
155   }
156 
GetScopedAllocator()157   ScopedArenaAllocator* GetScopedAllocator() {
158     return &allocator_;
159   }
160 
AddSlowPath(SlowPathCode * slow_path)161   void AddSlowPath(SlowPathCode* slow_path) {
162     slow_paths_.emplace_back(std::unique_ptr<SlowPathCode>(slow_path));
163   }
164 
GetSlowPaths() const165   ArrayRef<const std::unique_ptr<SlowPathCode>> GetSlowPaths() const {
166     return ArrayRef<const std::unique_ptr<SlowPathCode>>(slow_paths_);
167   }
168 
GetStackMapStream()169   StackMapStream* GetStackMapStream() { return &stack_map_stream_; }
170 
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)171   void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string) {
172     jit_string_roots_.Overwrite(string_reference,
173                                 reinterpret_cast64<uint64_t>(string.GetReference()));
174   }
175 
GetJitStringRootIndex(StringReference string_reference) const176   uint64_t GetJitStringRootIndex(StringReference string_reference) const {
177     return jit_string_roots_.Get(string_reference);
178   }
179 
GetNumberOfJitStringRoots() const180   size_t GetNumberOfJitStringRoots() const {
181     return jit_string_roots_.size();
182   }
183 
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)184   void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
185     jit_class_roots_.Overwrite(type_reference, reinterpret_cast64<uint64_t>(klass.GetReference()));
186   }
187 
GetJitClassRootIndex(TypeReference type_reference) const188   uint64_t GetJitClassRootIndex(TypeReference type_reference) const {
189     return jit_class_roots_.Get(type_reference);
190   }
191 
GetNumberOfJitClassRoots() const192   size_t GetNumberOfJitClassRoots() const {
193     return jit_class_roots_.size();
194   }
195 
GetNumberOfJitRoots() const196   size_t GetNumberOfJitRoots() const {
197     return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
198   }
199 
200   void EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots)
201       REQUIRES_SHARED(Locks::mutator_lock_);
202 
203  private:
CodeGenerationData(ScopedArenaAllocator && allocator,InstructionSet instruction_set)204   CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set)
205       : allocator_(std::move(allocator)),
206         stack_map_stream_(&allocator_, instruction_set),
207         slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)),
208         jit_string_roots_(StringReferenceValueComparator(),
209                           allocator_.Adapter(kArenaAllocCodeGenerator)),
210         jit_class_roots_(TypeReferenceValueComparator(),
211                          allocator_.Adapter(kArenaAllocCodeGenerator)) {
212     slow_paths_.reserve(kDefaultSlowPathsCapacity);
213   }
214 
215   static constexpr size_t kDefaultSlowPathsCapacity = 8;
216 
217   ScopedArenaAllocator allocator_;
218   StackMapStream stack_map_stream_;
219   ScopedArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
220 
221   // Maps a StringReference (dex_file, string_index) to the index in the literal table.
222   // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
223   // will compute all the indices.
224   ScopedArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
225 
226   // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
227   // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
228   // will compute all the indices.
229   ScopedArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
230 };
231 
EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots)232 void CodeGenerator::CodeGenerationData::EmitJitRoots(
233     Handle<mirror::ObjectArray<mirror::Object>> roots) {
234   DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
235   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
236   size_t index = 0;
237   for (auto& entry : jit_string_roots_) {
238     // Update the `roots` with the string, and replace the address temporarily
239     // stored to the index in the table.
240     uint64_t address = entry.second;
241     roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
242     DCHECK(roots->Get(index) != nullptr);
243     entry.second = index;
244     // Ensure the string is strongly interned. This is a requirement on how the JIT
245     // handles strings. b/32995596
246     class_linker->GetInternTable()->InternStrong(
247         reinterpret_cast<mirror::String*>(roots->Get(index)));
248     ++index;
249   }
250   for (auto& entry : jit_class_roots_) {
251     // Update the `roots` with the class, and replace the address temporarily
252     // stored to the index in the table.
253     uint64_t address = entry.second;
254     roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
255     DCHECK(roots->Get(index) != nullptr);
256     entry.second = index;
257     ++index;
258   }
259 }
260 
GetScopedAllocator()261 ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
262   DCHECK(code_generation_data_ != nullptr);
263   return code_generation_data_->GetScopedAllocator();
264 }
265 
GetStackMapStream()266 StackMapStream* CodeGenerator::GetStackMapStream() {
267   DCHECK(code_generation_data_ != nullptr);
268   return code_generation_data_->GetStackMapStream();
269 }
270 
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)271 void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
272                                          Handle<mirror::String> string) {
273   DCHECK(code_generation_data_ != nullptr);
274   code_generation_data_->ReserveJitStringRoot(string_reference, string);
275 }
276 
GetJitStringRootIndex(StringReference string_reference)277 uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
278   DCHECK(code_generation_data_ != nullptr);
279   return code_generation_data_->GetJitStringRootIndex(string_reference);
280 }
281 
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)282 void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
283   DCHECK(code_generation_data_ != nullptr);
284   code_generation_data_->ReserveJitClassRoot(type_reference, klass);
285 }
286 
GetJitClassRootIndex(TypeReference type_reference)287 uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
288   DCHECK(code_generation_data_ != nullptr);
289   return code_generation_data_->GetJitClassRootIndex(type_reference);
290 }
291 
EmitJitRootPatches(uint8_t * code ATTRIBUTE_UNUSED,const uint8_t * roots_data ATTRIBUTE_UNUSED)292 void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
293                                        const uint8_t* roots_data ATTRIBUTE_UNUSED) {
294   DCHECK(code_generation_data_ != nullptr);
295   DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
296   DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
297 }
298 
GetArrayLengthOffset(HArrayLength * array_length)299 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
300   return array_length->IsStringLength()
301       ? mirror::String::CountOffset().Uint32Value()
302       : mirror::Array::LengthOffset().Uint32Value();
303 }
304 
GetArrayDataOffset(HArrayGet * array_get)305 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
306   DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt());
307   return array_get->IsStringCharAt()
308       ? mirror::String::ValueOffset().Uint32Value()
309       : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value();
310 }
311 
GoesToNextBlock(HBasicBlock * current,HBasicBlock * next) const312 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
313   DCHECK_EQ((*block_order_)[current_block_index_], current);
314   return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
315 }
316 
GetNextBlockToEmit() const317 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
318   for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
319     HBasicBlock* block = (*block_order_)[i];
320     if (!block->IsSingleJump()) {
321       return block;
322     }
323   }
324   return nullptr;
325 }
326 
FirstNonEmptyBlock(HBasicBlock * block) const327 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
328   while (block->IsSingleJump()) {
329     block = block->GetSuccessors()[0];
330   }
331   return block;
332 }
333 
334 class DisassemblyScope {
335  public:
DisassemblyScope(HInstruction * instruction,const CodeGenerator & codegen)336   DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
337       : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
338     if (codegen_.GetDisassemblyInformation() != nullptr) {
339       start_offset_ = codegen_.GetAssembler().CodeSize();
340     }
341   }
342 
~DisassemblyScope()343   ~DisassemblyScope() {
344     // We avoid building this data when we know it will not be used.
345     if (codegen_.GetDisassemblyInformation() != nullptr) {
346       codegen_.GetDisassemblyInformation()->AddInstructionInterval(
347           instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
348     }
349   }
350 
351  private:
352   const CodeGenerator& codegen_;
353   HInstruction* instruction_;
354   size_t start_offset_;
355 };
356 
357 
GenerateSlowPaths()358 void CodeGenerator::GenerateSlowPaths() {
359   DCHECK(code_generation_data_ != nullptr);
360   size_t code_start = 0;
361   for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
362     SlowPathCode* slow_path = slow_path_ptr.get();
363     current_slow_path_ = slow_path;
364     if (disasm_info_ != nullptr) {
365       code_start = GetAssembler()->CodeSize();
366     }
367     // Record the dex pc at start of slow path (required for java line number mapping).
368     MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
369     slow_path->EmitNativeCode(this);
370     if (disasm_info_ != nullptr) {
371       disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
372     }
373   }
374   current_slow_path_ = nullptr;
375 }
376 
InitializeCodeGenerationData()377 void CodeGenerator::InitializeCodeGenerationData() {
378   DCHECK(code_generation_data_ == nullptr);
379   code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
380 }
381 
Compile(CodeAllocator * allocator)382 void CodeGenerator::Compile(CodeAllocator* allocator) {
383   InitializeCodeGenerationData();
384 
385   // The register allocator already called `InitializeCodeGeneration`,
386   // where the frame size has been computed.
387   DCHECK(block_order_ != nullptr);
388   Initialize();
389 
390   HGraphVisitor* instruction_visitor = GetInstructionVisitor();
391   DCHECK_EQ(current_block_index_, 0u);
392 
393   size_t frame_start = GetAssembler()->CodeSize();
394   GenerateFrameEntry();
395   DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
396   if (disasm_info_ != nullptr) {
397     disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
398   }
399 
400   for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
401     HBasicBlock* block = (*block_order_)[current_block_index_];
402     // Don't generate code for an empty block. Its predecessors will branch to its successor
403     // directly. Also, the label of that block will not be emitted, so this helps catch
404     // errors where we reference that label.
405     if (block->IsSingleJump()) continue;
406     Bind(block);
407     // This ensures that we have correct native line mapping for all native instructions.
408     // It is necessary to make stepping over a statement work. Otherwise, any initial
409     // instructions (e.g. moves) would be assumed to be the start of next statement.
410     MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
411     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
412       HInstruction* current = it.Current();
413       if (current->HasEnvironment()) {
414         // Create stackmap for HNativeDebugInfo or any instruction which calls native code.
415         // Note that we need correct mapping for the native PC of the call instruction,
416         // so the runtime's stackmap is not sufficient since it is at PC after the call.
417         MaybeRecordNativeDebugInfo(current, block->GetDexPc());
418       }
419       DisassemblyScope disassembly_scope(current, *this);
420       DCHECK(CheckTypeConsistency(current));
421       current->Accept(instruction_visitor);
422     }
423   }
424 
425   GenerateSlowPaths();
426 
427   // Emit catch stack maps at the end of the stack map stream as expected by the
428   // runtime exception handler.
429   if (graph_->HasTryCatch()) {
430     RecordCatchBlockInfo();
431   }
432 
433   // Finalize instructions in assember;
434   Finalize(allocator);
435 }
436 
Finalize(CodeAllocator * allocator)437 void CodeGenerator::Finalize(CodeAllocator* allocator) {
438   size_t code_size = GetAssembler()->CodeSize();
439   uint8_t* buffer = allocator->Allocate(code_size);
440 
441   MemoryRegion code(buffer, code_size);
442   GetAssembler()->FinalizeInstructions(code);
443 }
444 
EmitLinkerPatches(ArenaVector<linker::LinkerPatch> * linker_patches ATTRIBUTE_UNUSED)445 void CodeGenerator::EmitLinkerPatches(
446     ArenaVector<linker::LinkerPatch>* linker_patches ATTRIBUTE_UNUSED) {
447   // No linker patches by default.
448 }
449 
InitializeCodeGeneration(size_t number_of_spill_slots,size_t maximum_safepoint_spill_size,size_t number_of_out_slots,const ArenaVector<HBasicBlock * > & block_order)450 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
451                                              size_t maximum_safepoint_spill_size,
452                                              size_t number_of_out_slots,
453                                              const ArenaVector<HBasicBlock*>& block_order) {
454   block_order_ = &block_order;
455   DCHECK(!block_order.empty());
456   DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
457   ComputeSpillMask();
458   first_register_slot_in_slow_path_ = RoundUp(
459       (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
460 
461   if (number_of_spill_slots == 0
462       && !HasAllocatedCalleeSaveRegisters()
463       && IsLeafMethod()
464       && !RequiresCurrentMethod()) {
465     DCHECK_EQ(maximum_safepoint_spill_size, 0u);
466     SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
467   } else {
468     SetFrameSize(RoundUp(
469         first_register_slot_in_slow_path_
470         + maximum_safepoint_spill_size
471         + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
472         + FrameEntrySpillSize(),
473         kStackAlignment));
474   }
475 }
476 
CreateCommonInvokeLocationSummary(HInvoke * invoke,InvokeDexCallingConventionVisitor * visitor)477 void CodeGenerator::CreateCommonInvokeLocationSummary(
478     HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
479   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
480   LocationSummary* locations = new (allocator) LocationSummary(invoke,
481                                                                LocationSummary::kCallOnMainOnly);
482 
483   for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
484     HInstruction* input = invoke->InputAt(i);
485     locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
486   }
487 
488   locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
489 
490   if (invoke->IsInvokeStaticOrDirect()) {
491     HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
492     switch (call->GetMethodLoadKind()) {
493       case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
494         locations->SetInAt(call->GetSpecialInputIndex(), visitor->GetMethodLocation());
495         break;
496       case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall:
497         locations->AddTemp(visitor->GetMethodLocation());
498         locations->SetInAt(call->GetSpecialInputIndex(), Location::RequiresRegister());
499         break;
500       default:
501         locations->AddTemp(visitor->GetMethodLocation());
502         break;
503     }
504   } else {
505     locations->AddTemp(visitor->GetMethodLocation());
506   }
507 }
508 
GenerateInvokeStaticOrDirectRuntimeCall(HInvokeStaticOrDirect * invoke,Location temp,SlowPathCode * slow_path)509 void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
510     HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
511   MoveConstant(temp, invoke->GetDexMethodIndex());
512 
513   // The access check is unnecessary but we do not want to introduce
514   // extra entrypoints for the codegens that do not support some
515   // invoke type and fall back to the runtime call.
516 
517   // Initialize to anything to silent compiler warnings.
518   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
519   switch (invoke->GetInvokeType()) {
520     case kStatic:
521       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
522       break;
523     case kDirect:
524       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
525       break;
526     case kSuper:
527       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
528       break;
529     case kVirtual:
530     case kInterface:
531     case kPolymorphic:
532       LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
533       UNREACHABLE();
534   }
535 
536   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
537 }
GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved * invoke)538 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
539   MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetDexMethodIndex());
540 
541   // Initialize to anything to silent compiler warnings.
542   QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
543   switch (invoke->GetInvokeType()) {
544     case kStatic:
545       entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
546       break;
547     case kDirect:
548       entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
549       break;
550     case kVirtual:
551       entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
552       break;
553     case kSuper:
554       entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
555       break;
556     case kInterface:
557       entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
558       break;
559     case kPolymorphic:
560       LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
561       UNREACHABLE();
562   }
563   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
564 }
565 
GenerateInvokePolymorphicCall(HInvokePolymorphic * invoke)566 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
567   MoveConstant(invoke->GetLocations()->GetTemp(0), static_cast<int32_t>(invoke->GetType()));
568   QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
569   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
570 }
571 
CreateUnresolvedFieldLocationSummary(HInstruction * field_access,DataType::Type field_type,const FieldAccessCallingConvention & calling_convention)572 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
573     HInstruction* field_access,
574     DataType::Type field_type,
575     const FieldAccessCallingConvention& calling_convention) {
576   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
577       || field_access->IsUnresolvedInstanceFieldSet();
578   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
579       || field_access->IsUnresolvedStaticFieldGet();
580 
581   ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
582   LocationSummary* locations =
583       new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
584 
585   locations->AddTemp(calling_convention.GetFieldIndexLocation());
586 
587   if (is_instance) {
588     // Add the `this` object for instance field accesses.
589     locations->SetInAt(0, calling_convention.GetObjectLocation());
590   }
591 
592   // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
593   // regardless of the the type. Because of that we forced to special case
594   // the access to floating point values.
595   if (is_get) {
596     if (DataType::IsFloatingPointType(field_type)) {
597       // The return value will be stored in regular registers while register
598       // allocator expects it in a floating point register.
599       // Note We don't need to request additional temps because the return
600       // register(s) are already blocked due the call and they may overlap with
601       // the input or field index.
602       // The transfer between the two will be done at codegen level.
603       locations->SetOut(calling_convention.GetFpuLocation(field_type));
604     } else {
605       locations->SetOut(calling_convention.GetReturnLocation(field_type));
606     }
607   } else {
608      size_t set_index = is_instance ? 1 : 0;
609      if (DataType::IsFloatingPointType(field_type)) {
610       // The set value comes from a float location while the calling convention
611       // expects it in a regular register location. Allocate a temp for it and
612       // make the transfer at codegen.
613       AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
614       locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
615     } else {
616       locations->SetInAt(set_index,
617           calling_convention.GetSetValueLocation(field_type, is_instance));
618     }
619   }
620 }
621 
GenerateUnresolvedFieldAccess(HInstruction * field_access,DataType::Type field_type,uint32_t field_index,uint32_t dex_pc,const FieldAccessCallingConvention & calling_convention)622 void CodeGenerator::GenerateUnresolvedFieldAccess(
623     HInstruction* field_access,
624     DataType::Type field_type,
625     uint32_t field_index,
626     uint32_t dex_pc,
627     const FieldAccessCallingConvention& calling_convention) {
628   LocationSummary* locations = field_access->GetLocations();
629 
630   MoveConstant(locations->GetTemp(0), field_index);
631 
632   bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
633       || field_access->IsUnresolvedInstanceFieldSet();
634   bool is_get = field_access->IsUnresolvedInstanceFieldGet()
635       || field_access->IsUnresolvedStaticFieldGet();
636 
637   if (!is_get && DataType::IsFloatingPointType(field_type)) {
638     // Copy the float value to be set into the calling convention register.
639     // Note that using directly the temp location is problematic as we don't
640     // support temp register pairs. To avoid boilerplate conversion code, use
641     // the location from the calling convention.
642     MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
643                  locations->InAt(is_instance ? 1 : 0),
644                  (DataType::Is64BitType(field_type) ? DataType::Type::kInt64
645                                                     : DataType::Type::kInt32));
646   }
647 
648   QuickEntrypointEnum entrypoint = kQuickSet8Static;  // Initialize to anything to avoid warnings.
649   switch (field_type) {
650     case DataType::Type::kBool:
651       entrypoint = is_instance
652           ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
653           : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
654       break;
655     case DataType::Type::kInt8:
656       entrypoint = is_instance
657           ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
658           : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
659       break;
660     case DataType::Type::kInt16:
661       entrypoint = is_instance
662           ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
663           : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
664       break;
665     case DataType::Type::kUint16:
666       entrypoint = is_instance
667           ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
668           : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
669       break;
670     case DataType::Type::kInt32:
671     case DataType::Type::kFloat32:
672       entrypoint = is_instance
673           ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
674           : (is_get ? kQuickGet32Static : kQuickSet32Static);
675       break;
676     case DataType::Type::kReference:
677       entrypoint = is_instance
678           ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
679           : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
680       break;
681     case DataType::Type::kInt64:
682     case DataType::Type::kFloat64:
683       entrypoint = is_instance
684           ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
685           : (is_get ? kQuickGet64Static : kQuickSet64Static);
686       break;
687     default:
688       LOG(FATAL) << "Invalid type " << field_type;
689   }
690   InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
691 
692   if (is_get && DataType::IsFloatingPointType(field_type)) {
693     MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
694   }
695 }
696 
CreateLoadClassRuntimeCallLocationSummary(HLoadClass * cls,Location runtime_type_index_location,Location runtime_return_location)697 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
698                                                               Location runtime_type_index_location,
699                                                               Location runtime_return_location) {
700   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
701   DCHECK_EQ(cls->InputCount(), 1u);
702   LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
703       cls, LocationSummary::kCallOnMainOnly);
704   locations->SetInAt(0, Location::NoLocation());
705   locations->AddTemp(runtime_type_index_location);
706   locations->SetOut(runtime_return_location);
707 }
708 
GenerateLoadClassRuntimeCall(HLoadClass * cls)709 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
710   DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
711   LocationSummary* locations = cls->GetLocations();
712   MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
713   if (cls->NeedsAccessCheck()) {
714     CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
715     InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
716   } else if (cls->MustGenerateClinitCheck()) {
717     CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
718     InvokeRuntime(kQuickInitializeStaticStorage, cls, cls->GetDexPc());
719   } else {
720     CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
721     InvokeRuntime(kQuickInitializeType, cls, cls->GetDexPc());
722   }
723 }
724 
BlockIfInRegister(Location location,bool is_out) const725 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
726   // The DCHECKS below check that a register is not specified twice in
727   // the summary. The out location can overlap with an input, so we need
728   // to special case it.
729   if (location.IsRegister()) {
730     DCHECK(is_out || !blocked_core_registers_[location.reg()]);
731     blocked_core_registers_[location.reg()] = true;
732   } else if (location.IsFpuRegister()) {
733     DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
734     blocked_fpu_registers_[location.reg()] = true;
735   } else if (location.IsFpuRegisterPair()) {
736     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
737     blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
738     DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
739     blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
740   } else if (location.IsRegisterPair()) {
741     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
742     blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
743     DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
744     blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
745   }
746 }
747 
AllocateLocations(HInstruction * instruction)748 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
749   for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
750     env->AllocateLocations();
751   }
752   instruction->Accept(GetLocationBuilder());
753   DCHECK(CheckTypeConsistency(instruction));
754   LocationSummary* locations = instruction->GetLocations();
755   if (!instruction->IsSuspendCheckEntry()) {
756     if (locations != nullptr) {
757       if (locations->CanCall()) {
758         MarkNotLeaf();
759       } else if (locations->Intrinsified() &&
760                  instruction->IsInvokeStaticOrDirect() &&
761                  !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
762         // A static method call that has been fully intrinsified, and cannot call on the slow
763         // path or refer to the current method directly, no longer needs current method.
764         return;
765       }
766     }
767     if (instruction->NeedsCurrentMethod()) {
768       SetRequiresCurrentMethod();
769     }
770   }
771 }
772 
Create(HGraph * graph,InstructionSet instruction_set,const InstructionSetFeatures & isa_features,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)773 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
774                                                      InstructionSet instruction_set,
775                                                      const InstructionSetFeatures& isa_features,
776                                                      const CompilerOptions& compiler_options,
777                                                      OptimizingCompilerStats* stats) {
778   ArenaAllocator* allocator = graph->GetAllocator();
779   switch (instruction_set) {
780 #ifdef ART_ENABLE_CODEGEN_arm
781     case InstructionSet::kArm:
782     case InstructionSet::kThumb2: {
783       return std::unique_ptr<CodeGenerator>(
784           new (allocator) arm::CodeGeneratorARMVIXL(
785               graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
786     }
787 #endif
788 #ifdef ART_ENABLE_CODEGEN_arm64
789     case InstructionSet::kArm64: {
790       return std::unique_ptr<CodeGenerator>(
791           new (allocator) arm64::CodeGeneratorARM64(
792               graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
793     }
794 #endif
795 #ifdef ART_ENABLE_CODEGEN_mips
796     case InstructionSet::kMips: {
797       return std::unique_ptr<CodeGenerator>(
798           new (allocator) mips::CodeGeneratorMIPS(
799               graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
800     }
801 #endif
802 #ifdef ART_ENABLE_CODEGEN_mips64
803     case InstructionSet::kMips64: {
804       return std::unique_ptr<CodeGenerator>(
805           new (allocator) mips64::CodeGeneratorMIPS64(
806               graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
807     }
808 #endif
809 #ifdef ART_ENABLE_CODEGEN_x86
810     case InstructionSet::kX86: {
811       return std::unique_ptr<CodeGenerator>(
812           new (allocator) x86::CodeGeneratorX86(
813               graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
814     }
815 #endif
816 #ifdef ART_ENABLE_CODEGEN_x86_64
817     case InstructionSet::kX86_64: {
818       return std::unique_ptr<CodeGenerator>(
819           new (allocator) x86_64::CodeGeneratorX86_64(
820               graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
821     }
822 #endif
823     default:
824       return nullptr;
825   }
826 }
827 
CodeGenerator(HGraph * graph,size_t number_of_core_registers,size_t number_of_fpu_registers,size_t number_of_register_pairs,uint32_t core_callee_save_mask,uint32_t fpu_callee_save_mask,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)828 CodeGenerator::CodeGenerator(HGraph* graph,
829                              size_t number_of_core_registers,
830                              size_t number_of_fpu_registers,
831                              size_t number_of_register_pairs,
832                              uint32_t core_callee_save_mask,
833                              uint32_t fpu_callee_save_mask,
834                              const CompilerOptions& compiler_options,
835                              OptimizingCompilerStats* stats)
836     : frame_size_(0),
837       core_spill_mask_(0),
838       fpu_spill_mask_(0),
839       first_register_slot_in_slow_path_(0),
840       allocated_registers_(RegisterSet::Empty()),
841       blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
842                                                                       kArenaAllocCodeGenerator)),
843       blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
844                                                                      kArenaAllocCodeGenerator)),
845       number_of_core_registers_(number_of_core_registers),
846       number_of_fpu_registers_(number_of_fpu_registers),
847       number_of_register_pairs_(number_of_register_pairs),
848       core_callee_save_mask_(core_callee_save_mask),
849       fpu_callee_save_mask_(fpu_callee_save_mask),
850       block_order_(nullptr),
851       disasm_info_(nullptr),
852       stats_(stats),
853       graph_(graph),
854       compiler_options_(compiler_options),
855       current_slow_path_(nullptr),
856       current_block_index_(0),
857       is_leaf_(true),
858       requires_current_method_(false),
859       code_generation_data_() {
860 }
861 
~CodeGenerator()862 CodeGenerator::~CodeGenerator() {}
863 
ComputeStackMapAndMethodInfoSize(size_t * stack_map_size,size_t * method_info_size)864 void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size,
865                                                      size_t* method_info_size) {
866   DCHECK(stack_map_size != nullptr);
867   DCHECK(method_info_size != nullptr);
868   StackMapStream* stack_map_stream = GetStackMapStream();
869   *stack_map_size = stack_map_stream->PrepareForFillIn();
870   *method_info_size = stack_map_stream->ComputeMethodInfoSize();
871 }
872 
GetNumberOfJitRoots() const873 size_t CodeGenerator::GetNumberOfJitRoots() const {
874   DCHECK(code_generation_data_ != nullptr);
875   return code_generation_data_->GetNumberOfJitRoots();
876 }
877 
CheckCovers(uint32_t dex_pc,const HGraph & graph,const CodeInfo & code_info,const ArenaVector<HSuspendCheck * > & loop_headers,ArenaVector<size_t> * covered)878 static void CheckCovers(uint32_t dex_pc,
879                         const HGraph& graph,
880                         const CodeInfo& code_info,
881                         const ArenaVector<HSuspendCheck*>& loop_headers,
882                         ArenaVector<size_t>* covered) {
883   CodeInfoEncoding encoding = code_info.ExtractEncoding();
884   for (size_t i = 0; i < loop_headers.size(); ++i) {
885     if (loop_headers[i]->GetDexPc() == dex_pc) {
886       if (graph.IsCompilingOsr()) {
887         DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc, encoding).IsValid());
888       }
889       ++(*covered)[i];
890     }
891   }
892 }
893 
894 // Debug helper to ensure loop entries in compiled code are matched by
895 // dex branch instructions.
CheckLoopEntriesCanBeUsedForOsr(const HGraph & graph,const CodeInfo & code_info,const DexFile::CodeItem & code_item)896 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
897                                             const CodeInfo& code_info,
898                                             const DexFile::CodeItem& code_item) {
899   if (graph.HasTryCatch()) {
900     // One can write loops through try/catch, which we do not support for OSR anyway.
901     return;
902   }
903   ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
904   for (HBasicBlock* block : graph.GetReversePostOrder()) {
905     if (block->IsLoopHeader()) {
906       HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
907       if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
908         loop_headers.push_back(suspend_check);
909       }
910     }
911   }
912   ArenaVector<size_t> covered(
913       loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
914   for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(),
915                                                                       &code_item)) {
916     const uint32_t dex_pc = pair.DexPc();
917     const Instruction& instruction = pair.Inst();
918     if (instruction.IsBranch()) {
919       uint32_t target = dex_pc + instruction.GetTargetOffset();
920       CheckCovers(target, graph, code_info, loop_headers, &covered);
921     } else if (instruction.IsSwitch()) {
922       DexSwitchTable table(instruction, dex_pc);
923       uint16_t num_entries = table.GetNumEntries();
924       size_t offset = table.GetFirstValueIndex();
925 
926       // Use a larger loop counter type to avoid overflow issues.
927       for (size_t i = 0; i < num_entries; ++i) {
928         // The target of the case.
929         uint32_t target = dex_pc + table.GetEntryAt(i + offset);
930         CheckCovers(target, graph, code_info, loop_headers, &covered);
931       }
932     }
933   }
934 
935   for (size_t i = 0; i < covered.size(); ++i) {
936     DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
937   }
938 }
939 
BuildStackMaps(MemoryRegion stack_map_region,MemoryRegion method_info_region,const DexFile::CodeItem * code_item_for_osr_check)940 void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
941                                    MemoryRegion method_info_region,
942                                    const DexFile::CodeItem* code_item_for_osr_check) {
943   StackMapStream* stack_map_stream = GetStackMapStream();
944   stack_map_stream->FillInCodeInfo(stack_map_region);
945   stack_map_stream->FillInMethodInfo(method_info_region);
946   if (kIsDebugBuild && code_item_for_osr_check != nullptr) {
947     CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), *code_item_for_osr_check);
948   }
949 }
950 
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)951 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
952                                  uint32_t dex_pc,
953                                  SlowPathCode* slow_path) {
954   if (instruction != nullptr) {
955     // The code generated for some type conversions
956     // may call the runtime, thus normally requiring a subsequent
957     // call to this method. However, the method verifier does not
958     // produce PC information for certain instructions, which are
959     // considered "atomic" (they cannot join a GC).
960     // Therefore we do not currently record PC information for such
961     // instructions.  As this may change later, we added this special
962     // case so that code generators may nevertheless call
963     // CodeGenerator::RecordPcInfo without triggering an error in
964     // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
965     // thereafter.
966     if (instruction->IsTypeConversion()) {
967       return;
968     }
969     if (instruction->IsRem()) {
970       DataType::Type type = instruction->AsRem()->GetResultType();
971       if ((type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64)) {
972         return;
973       }
974     }
975   }
976 
977   // Collect PC infos for the mapping table.
978   uint32_t native_pc = GetAssembler()->CodePosition();
979 
980   StackMapStream* stack_map_stream = GetStackMapStream();
981   if (instruction == nullptr) {
982     // For stack overflow checks and native-debug-info entries without dex register
983     // mapping (i.e. start of basic block or start of slow path).
984     stack_map_stream->BeginStackMapEntry(dex_pc, native_pc, 0, 0, 0, 0);
985     stack_map_stream->EndStackMapEntry();
986     return;
987   }
988 
989   LocationSummary* locations = instruction->GetLocations();
990   uint32_t register_mask = locations->GetRegisterMask();
991   DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
992   if (locations->OnlyCallsOnSlowPath()) {
993     // In case of slow path, we currently set the location of caller-save registers
994     // to register (instead of their stack location when pushed before the slow-path
995     // call). Therefore register_mask contains both callee-save and caller-save
996     // registers that hold objects. We must remove the spilled caller-save from the
997     // mask, since they will be overwritten by the callee.
998     uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true);
999     register_mask &= ~spills;
1000   } else {
1001     // The register mask must be a subset of callee-save registers.
1002     DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
1003   }
1004 
1005   uint32_t outer_dex_pc = dex_pc;
1006   uint32_t outer_environment_size = 0u;
1007   uint32_t inlining_depth = 0;
1008   HEnvironment* const environment = instruction->GetEnvironment();
1009   if (environment != nullptr) {
1010     HEnvironment* outer_environment = environment;
1011     while (outer_environment->GetParent() != nullptr) {
1012       outer_environment = outer_environment->GetParent();
1013       ++inlining_depth;
1014     }
1015     outer_dex_pc = outer_environment->GetDexPc();
1016     outer_environment_size = outer_environment->Size();
1017   }
1018   stack_map_stream->BeginStackMapEntry(outer_dex_pc,
1019                                        native_pc,
1020                                        register_mask,
1021                                        locations->GetStackMask(),
1022                                        outer_environment_size,
1023                                        inlining_depth);
1024   EmitEnvironment(environment, slow_path);
1025   // Record invoke info, the common case for the trampoline is super and static invokes. Only
1026   // record these to reduce oat file size.
1027   if (kEnableDexLayoutOptimizations) {
1028     if (instruction->IsInvokeStaticOrDirect()) {
1029       HInvoke* const invoke = instruction->AsInvokeStaticOrDirect();
1030       DCHECK(environment != nullptr);
1031       stack_map_stream->AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex());
1032     }
1033   }
1034   stack_map_stream->EndStackMapEntry();
1035 
1036   HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
1037   if (instruction->IsSuspendCheck() &&
1038       (info != nullptr) &&
1039       graph_->IsCompilingOsr() &&
1040       (inlining_depth == 0)) {
1041     DCHECK_EQ(info->GetSuspendCheck(), instruction);
1042     // We duplicate the stack map as a marker that this stack map can be an OSR entry.
1043     // Duplicating it avoids having the runtime recognize and skip an OSR stack map.
1044     DCHECK(info->IsIrreducible());
1045     stack_map_stream->BeginStackMapEntry(
1046         dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0);
1047     EmitEnvironment(instruction->GetEnvironment(), slow_path);
1048     stack_map_stream->EndStackMapEntry();
1049     if (kIsDebugBuild) {
1050       for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1051         HInstruction* in_environment = environment->GetInstructionAt(i);
1052         if (in_environment != nullptr) {
1053           DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
1054           Location location = environment->GetLocationAt(i);
1055           DCHECK(location.IsStackSlot() ||
1056                  location.IsDoubleStackSlot() ||
1057                  location.IsConstant() ||
1058                  location.IsInvalid());
1059           if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
1060             DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
1061           }
1062         }
1063       }
1064     }
1065   } else if (kIsDebugBuild) {
1066     // Ensure stack maps are unique, by checking that the native pc in the stack map
1067     // last emitted is different than the native pc of the stack map just emitted.
1068     size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps();
1069     if (number_of_stack_maps > 1) {
1070       DCHECK_NE(stack_map_stream->GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
1071                 stack_map_stream->GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
1072     }
1073   }
1074 }
1075 
HasStackMapAtCurrentPc()1076 bool CodeGenerator::HasStackMapAtCurrentPc() {
1077   uint32_t pc = GetAssembler()->CodeSize();
1078   StackMapStream* stack_map_stream = GetStackMapStream();
1079   size_t count = stack_map_stream->GetNumberOfStackMaps();
1080   if (count == 0) {
1081     return false;
1082   }
1083   CodeOffset native_pc_offset = stack_map_stream->GetStackMap(count - 1).native_pc_code_offset;
1084   return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
1085 }
1086 
MaybeRecordNativeDebugInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1087 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
1088                                                uint32_t dex_pc,
1089                                                SlowPathCode* slow_path) {
1090   if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
1091     if (HasStackMapAtCurrentPc()) {
1092       // Ensure that we do not collide with the stack map of the previous instruction.
1093       GenerateNop();
1094     }
1095     RecordPcInfo(instruction, dex_pc, slow_path);
1096   }
1097 }
1098 
RecordCatchBlockInfo()1099 void CodeGenerator::RecordCatchBlockInfo() {
1100   ArenaAllocator* allocator = graph_->GetAllocator();
1101   StackMapStream* stack_map_stream = GetStackMapStream();
1102 
1103   for (HBasicBlock* block : *block_order_) {
1104     if (!block->IsCatchBlock()) {
1105       continue;
1106     }
1107 
1108     uint32_t dex_pc = block->GetDexPc();
1109     uint32_t num_vregs = graph_->GetNumberOfVRegs();
1110     uint32_t inlining_depth = 0;  // Inlining of catch blocks is not supported at the moment.
1111     uint32_t native_pc = GetAddressOf(block);
1112     uint32_t register_mask = 0;   // Not used.
1113 
1114     // The stack mask is not used, so we leave it empty.
1115     ArenaBitVector* stack_mask =
1116         ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator);
1117 
1118     stack_map_stream->BeginStackMapEntry(dex_pc,
1119                                          native_pc,
1120                                          register_mask,
1121                                          stack_mask,
1122                                          num_vregs,
1123                                          inlining_depth);
1124 
1125     HInstruction* current_phi = block->GetFirstPhi();
1126     for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
1127     while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
1128       HInstruction* next_phi = current_phi->GetNext();
1129       DCHECK(next_phi == nullptr ||
1130              current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
1131           << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
1132       current_phi = next_phi;
1133     }
1134 
1135       if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
1136         stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1137       } else {
1138         Location location = current_phi->GetLocations()->Out();
1139         switch (location.GetKind()) {
1140           case Location::kStackSlot: {
1141             stack_map_stream->AddDexRegisterEntry(
1142                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1143             break;
1144           }
1145           case Location::kDoubleStackSlot: {
1146             stack_map_stream->AddDexRegisterEntry(
1147                 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1148             stack_map_stream->AddDexRegisterEntry(
1149                 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1150             ++vreg;
1151             DCHECK_LT(vreg, num_vregs);
1152             break;
1153           }
1154           default: {
1155             // All catch phis must be allocated to a stack slot.
1156             LOG(FATAL) << "Unexpected kind " << location.GetKind();
1157             UNREACHABLE();
1158           }
1159         }
1160       }
1161     }
1162 
1163     stack_map_stream->EndStackMapEntry();
1164   }
1165 }
1166 
AddSlowPath(SlowPathCode * slow_path)1167 void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
1168   DCHECK(code_generation_data_ != nullptr);
1169   code_generation_data_->AddSlowPath(slow_path);
1170 }
1171 
EmitEnvironment(HEnvironment * environment,SlowPathCode * slow_path)1172 void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
1173   if (environment == nullptr) return;
1174 
1175   StackMapStream* stack_map_stream = GetStackMapStream();
1176   if (environment->GetParent() != nullptr) {
1177     // We emit the parent environment first.
1178     EmitEnvironment(environment->GetParent(), slow_path);
1179     stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
1180                                            environment->GetDexPc(),
1181                                            environment->Size(),
1182                                            &graph_->GetDexFile());
1183   }
1184 
1185   // Walk over the environment, and record the location of dex registers.
1186   for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1187     HInstruction* current = environment->GetInstructionAt(i);
1188     if (current == nullptr) {
1189       stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1190       continue;
1191     }
1192 
1193     Location location = environment->GetLocationAt(i);
1194     switch (location.GetKind()) {
1195       case Location::kConstant: {
1196         DCHECK_EQ(current, location.GetConstant());
1197         if (current->IsLongConstant()) {
1198           int64_t value = current->AsLongConstant()->GetValue();
1199           stack_map_stream->AddDexRegisterEntry(
1200               DexRegisterLocation::Kind::kConstant, Low32Bits(value));
1201           stack_map_stream->AddDexRegisterEntry(
1202               DexRegisterLocation::Kind::kConstant, High32Bits(value));
1203           ++i;
1204           DCHECK_LT(i, environment_size);
1205         } else if (current->IsDoubleConstant()) {
1206           int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
1207           stack_map_stream->AddDexRegisterEntry(
1208               DexRegisterLocation::Kind::kConstant, Low32Bits(value));
1209           stack_map_stream->AddDexRegisterEntry(
1210               DexRegisterLocation::Kind::kConstant, High32Bits(value));
1211           ++i;
1212           DCHECK_LT(i, environment_size);
1213         } else if (current->IsIntConstant()) {
1214           int32_t value = current->AsIntConstant()->GetValue();
1215           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
1216         } else if (current->IsNullConstant()) {
1217           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
1218         } else {
1219           DCHECK(current->IsFloatConstant()) << current->DebugName();
1220           int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
1221           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
1222         }
1223         break;
1224       }
1225 
1226       case Location::kStackSlot: {
1227         stack_map_stream->AddDexRegisterEntry(
1228             DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1229         break;
1230       }
1231 
1232       case Location::kDoubleStackSlot: {
1233         stack_map_stream->AddDexRegisterEntry(
1234             DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1235         stack_map_stream->AddDexRegisterEntry(
1236             DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1237         ++i;
1238         DCHECK_LT(i, environment_size);
1239         break;
1240       }
1241 
1242       case Location::kRegister : {
1243         int id = location.reg();
1244         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
1245           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
1246           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1247           if (current->GetType() == DataType::Type::kInt64) {
1248             stack_map_stream->AddDexRegisterEntry(
1249                 DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
1250             ++i;
1251             DCHECK_LT(i, environment_size);
1252           }
1253         } else {
1254           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
1255           if (current->GetType() == DataType::Type::kInt64) {
1256             stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
1257             ++i;
1258             DCHECK_LT(i, environment_size);
1259           }
1260         }
1261         break;
1262       }
1263 
1264       case Location::kFpuRegister : {
1265         int id = location.reg();
1266         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
1267           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
1268           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1269           if (current->GetType() == DataType::Type::kFloat64) {
1270             stack_map_stream->AddDexRegisterEntry(
1271                 DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
1272             ++i;
1273             DCHECK_LT(i, environment_size);
1274           }
1275         } else {
1276           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
1277           if (current->GetType() == DataType::Type::kFloat64) {
1278             stack_map_stream->AddDexRegisterEntry(
1279                 DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
1280             ++i;
1281             DCHECK_LT(i, environment_size);
1282           }
1283         }
1284         break;
1285       }
1286 
1287       case Location::kFpuRegisterPair : {
1288         int low = location.low();
1289         int high = location.high();
1290         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
1291           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
1292           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1293         } else {
1294           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
1295         }
1296         if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
1297           uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
1298           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1299           ++i;
1300         } else {
1301           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
1302           ++i;
1303         }
1304         DCHECK_LT(i, environment_size);
1305         break;
1306       }
1307 
1308       case Location::kRegisterPair : {
1309         int low = location.low();
1310         int high = location.high();
1311         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
1312           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
1313           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1314         } else {
1315           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
1316         }
1317         if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
1318           uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
1319           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1320         } else {
1321           stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
1322         }
1323         ++i;
1324         DCHECK_LT(i, environment_size);
1325         break;
1326       }
1327 
1328       case Location::kInvalid: {
1329         stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1330         break;
1331       }
1332 
1333       default:
1334         LOG(FATAL) << "Unexpected kind " << location.GetKind();
1335     }
1336   }
1337 
1338   if (environment->GetParent() != nullptr) {
1339     stack_map_stream->EndInlineInfoEntry();
1340   }
1341 }
1342 
CanMoveNullCheckToUser(HNullCheck * null_check)1343 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
1344   HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
1345 
1346   return (first_next_not_move != nullptr)
1347       && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0));
1348 }
1349 
MaybeRecordImplicitNullCheck(HInstruction * instr)1350 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
1351   if (!compiler_options_.GetImplicitNullChecks()) {
1352     return;
1353   }
1354 
1355   // If we are from a static path don't record the pc as we can't throw NPE.
1356   // NB: having the checks here makes the code much less verbose in the arch
1357   // specific code generators.
1358   if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) {
1359     return;
1360   }
1361 
1362   if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) {
1363     return;
1364   }
1365 
1366   // Find the first previous instruction which is not a move.
1367   HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves();
1368 
1369   // If the instruction is a null check it means that `instr` is the first user
1370   // and needs to record the pc.
1371   if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
1372     HNullCheck* null_check = first_prev_not_move->AsNullCheck();
1373     // TODO: The parallel moves modify the environment. Their changes need to be
1374     // reverted otherwise the stack maps at the throw point will not be correct.
1375     RecordPcInfo(null_check, null_check->GetDexPc());
1376   }
1377 }
1378 
CreateThrowingSlowPathLocations(HInstruction * instruction,RegisterSet caller_saves)1379 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
1380                                                                 RegisterSet caller_saves) {
1381   // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
1382   // HSuspendCheck from entry block). However, it will still get a valid stack frame
1383   // because the HNullCheck needs an environment.
1384   LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
1385   // When throwing from a try block, we may need to retrieve dalvik registers from
1386   // physical registers and we also need to set up stack mask for GC. This is
1387   // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
1388   bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
1389   if (can_throw_into_catch_block) {
1390     call_kind = LocationSummary::kCallOnSlowPath;
1391   }
1392   LocationSummary* locations =
1393       new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
1394   if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
1395     locations->SetCustomSlowPathCallerSaves(caller_saves);  // Default: no caller-save registers.
1396   }
1397   DCHECK(!instruction->HasUses());
1398   return locations;
1399 }
1400 
GenerateNullCheck(HNullCheck * instruction)1401 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
1402   if (compiler_options_.GetImplicitNullChecks()) {
1403     MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
1404     GenerateImplicitNullCheck(instruction);
1405   } else {
1406     MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
1407     GenerateExplicitNullCheck(instruction);
1408   }
1409 }
1410 
ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck * suspend_check,HParallelMove * spills) const1411 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
1412                                                           HParallelMove* spills) const {
1413   LocationSummary* locations = suspend_check->GetLocations();
1414   HBasicBlock* block = suspend_check->GetBlock();
1415   DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1416   DCHECK(block->IsLoopHeader());
1417   DCHECK(block->GetFirstInstruction() == spills);
1418 
1419   for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
1420     Location dest = spills->MoveOperandsAt(i)->GetDestination();
1421     // All parallel moves in loop headers are spills.
1422     DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
1423     // Clear the stack bit marking a reference. Do not bother to check if the spill is
1424     // actually a reference spill, clearing bits that are already zero is harmless.
1425     locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
1426   }
1427 }
1428 
EmitParallelMoves(Location from1,Location to1,DataType::Type type1,Location from2,Location to2,DataType::Type type2)1429 void CodeGenerator::EmitParallelMoves(Location from1,
1430                                       Location to1,
1431                                       DataType::Type type1,
1432                                       Location from2,
1433                                       Location to2,
1434                                       DataType::Type type2) {
1435   HParallelMove parallel_move(GetGraph()->GetAllocator());
1436   parallel_move.AddMove(from1, to1, type1, nullptr);
1437   parallel_move.AddMove(from2, to2, type2, nullptr);
1438   GetMoveResolver()->EmitNativeCode(&parallel_move);
1439 }
1440 
ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,SlowPathCode * slow_path)1441 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
1442                                           HInstruction* instruction,
1443                                           SlowPathCode* slow_path) {
1444   // Ensure that the call kind indication given to the register allocator is
1445   // coherent with the runtime call generated.
1446   if (slow_path == nullptr) {
1447     DCHECK(instruction->GetLocations()->WillCall())
1448         << "instruction->DebugName()=" << instruction->DebugName();
1449   } else {
1450     DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
1451         << "instruction->DebugName()=" << instruction->DebugName()
1452         << " slow_path->GetDescription()=" << slow_path->GetDescription();
1453   }
1454 
1455   // Check that the GC side effect is set when required.
1456   // TODO: Reverse EntrypointCanTriggerGC
1457   if (EntrypointCanTriggerGC(entrypoint)) {
1458     if (slow_path == nullptr) {
1459       DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1460           << "instruction->DebugName()=" << instruction->DebugName()
1461           << " instruction->GetSideEffects().ToString()="
1462           << instruction->GetSideEffects().ToString();
1463     } else {
1464       DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1465              // When (non-Baker) read barriers are enabled, some instructions
1466              // use a slow path to emit a read barrier, which does not trigger
1467              // GC.
1468              (kEmitCompilerReadBarrier &&
1469               !kUseBakerReadBarrier &&
1470               (instruction->IsInstanceFieldGet() ||
1471                instruction->IsStaticFieldGet() ||
1472                instruction->IsArrayGet() ||
1473                instruction->IsLoadClass() ||
1474                instruction->IsLoadString() ||
1475                instruction->IsInstanceOf() ||
1476                instruction->IsCheckCast() ||
1477                (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
1478           << "instruction->DebugName()=" << instruction->DebugName()
1479           << " instruction->GetSideEffects().ToString()="
1480           << instruction->GetSideEffects().ToString()
1481           << " slow_path->GetDescription()=" << slow_path->GetDescription();
1482     }
1483   } else {
1484     // The GC side effect is not required for the instruction. But the instruction might still have
1485     // it, for example if it calls other entrypoints requiring it.
1486   }
1487 
1488   // Check the coherency of leaf information.
1489   DCHECK(instruction->IsSuspendCheck()
1490          || ((slow_path != nullptr) && slow_path->IsFatal())
1491          || instruction->GetLocations()->CanCall()
1492          || !IsLeafMethod())
1493       << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1494 }
1495 
ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction * instruction,SlowPathCode * slow_path)1496 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
1497                                                                 SlowPathCode* slow_path) {
1498   DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
1499       << "instruction->DebugName()=" << instruction->DebugName()
1500       << " slow_path->GetDescription()=" << slow_path->GetDescription();
1501   // Only the Baker read barrier marking slow path used by certains
1502   // instructions is expected to invoke the runtime without recording
1503   // PC-related information.
1504   DCHECK(kUseBakerReadBarrier);
1505   DCHECK(instruction->IsInstanceFieldGet() ||
1506          instruction->IsStaticFieldGet() ||
1507          instruction->IsArrayGet() ||
1508          instruction->IsArraySet() ||
1509          instruction->IsLoadClass() ||
1510          instruction->IsLoadString() ||
1511          instruction->IsInstanceOf() ||
1512          instruction->IsCheckCast() ||
1513          (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()) ||
1514          (instruction->IsInvokeStaticOrDirect() && instruction->GetLocations()->Intrinsified()))
1515       << "instruction->DebugName()=" << instruction->DebugName()
1516       << " slow_path->GetDescription()=" << slow_path->GetDescription();
1517 }
1518 
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1519 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1520   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1521 
1522   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
1523   for (uint32_t i : LowToHighBits(core_spills)) {
1524     // If the register holds an object, update the stack mask.
1525     if (locations->RegisterContainsObject(i)) {
1526       locations->SetStackBit(stack_offset / kVRegSize);
1527     }
1528     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1529     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1530     saved_core_stack_offsets_[i] = stack_offset;
1531     stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1532   }
1533 
1534   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
1535   for (uint32_t i : LowToHighBits(fp_spills)) {
1536     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1537     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1538     saved_fpu_stack_offsets_[i] = stack_offset;
1539     stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1540   }
1541 }
1542 
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1543 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1544   size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1545 
1546   const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
1547   for (uint32_t i : LowToHighBits(core_spills)) {
1548     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1549     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1550     stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1551   }
1552 
1553   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
1554   for (uint32_t i : LowToHighBits(fp_spills)) {
1555     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1556     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1557     stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1558   }
1559 }
1560 
CreateSystemArrayCopyLocationSummary(HInvoke * invoke)1561 void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
1562   // Check to see if we have known failures that will cause us to have to bail out
1563   // to the runtime, and just generate the runtime call directly.
1564   HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
1565   HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
1566 
1567   // The positions must be non-negative.
1568   if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
1569       (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
1570     // We will have to fail anyways.
1571     return;
1572   }
1573 
1574   // The length must be >= 0.
1575   HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
1576   if (length != nullptr) {
1577     int32_t len = length->GetValue();
1578     if (len < 0) {
1579       // Just call as normal.
1580       return;
1581     }
1582   }
1583 
1584   SystemArrayCopyOptimizations optimizations(invoke);
1585 
1586   if (optimizations.GetDestinationIsSource()) {
1587     if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
1588       // We only support backward copying if source and destination are the same.
1589       return;
1590     }
1591   }
1592 
1593   if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
1594     // We currently don't intrinsify primitive copying.
1595     return;
1596   }
1597 
1598   ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
1599   LocationSummary* locations = new (allocator) LocationSummary(invoke,
1600                                                                LocationSummary::kCallOnSlowPath,
1601                                                                kIntrinsified);
1602   // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
1603   locations->SetInAt(0, Location::RequiresRegister());
1604   locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
1605   locations->SetInAt(2, Location::RequiresRegister());
1606   locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
1607   locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
1608 
1609   locations->AddTemp(Location::RequiresRegister());
1610   locations->AddTemp(Location::RequiresRegister());
1611   locations->AddTemp(Location::RequiresRegister());
1612 }
1613 
EmitJitRoots(uint8_t * code,Handle<mirror::ObjectArray<mirror::Object>> roots,const uint8_t * roots_data)1614 void CodeGenerator::EmitJitRoots(uint8_t* code,
1615                                  Handle<mirror::ObjectArray<mirror::Object>> roots,
1616                                  const uint8_t* roots_data) {
1617   code_generation_data_->EmitJitRoots(roots);
1618   EmitJitRootPatches(code, roots_data);
1619 }
1620 
GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass)1621 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
1622   ScopedObjectAccess soa(Thread::Current());
1623   if (array_klass == nullptr) {
1624     // This can only happen for non-primitive arrays, as primitive arrays can always
1625     // be resolved.
1626     return kQuickAllocArrayResolved32;
1627   }
1628 
1629   switch (array_klass->GetComponentSize()) {
1630     case 1: return kQuickAllocArrayResolved8;
1631     case 2: return kQuickAllocArrayResolved16;
1632     case 4: return kQuickAllocArrayResolved32;
1633     case 8: return kQuickAllocArrayResolved64;
1634   }
1635   LOG(FATAL) << "Unreachable";
1636   return kQuickAllocArrayResolved;
1637 }
1638 
1639 }  // namespace art
1640