1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_generator.h"
18
19 #ifdef ART_ENABLE_CODEGEN_arm
20 #include "code_generator_arm_vixl.h"
21 #endif
22
23 #ifdef ART_ENABLE_CODEGEN_arm64
24 #include "code_generator_arm64.h"
25 #endif
26
27 #ifdef ART_ENABLE_CODEGEN_x86
28 #include "code_generator_x86.h"
29 #endif
30
31 #ifdef ART_ENABLE_CODEGEN_x86_64
32 #include "code_generator_x86_64.h"
33 #endif
34
35 #include "art_method-inl.h"
36 #include "base/bit_utils.h"
37 #include "base/bit_utils_iterator.h"
38 #include "base/casts.h"
39 #include "base/leb128.h"
40 #include "class_linker.h"
41 #include "class_root-inl.h"
42 #include "compiled_method.h"
43 #include "dex/bytecode_utils.h"
44 #include "dex/code_item_accessors-inl.h"
45 #include "dex/verified_method.h"
46 #include "graph_visualizer.h"
47 #include "image.h"
48 #include "gc/space/image_space.h"
49 #include "intern_table.h"
50 #include "intrinsics.h"
51 #include "mirror/array-inl.h"
52 #include "mirror/object_array-inl.h"
53 #include "mirror/object_reference.h"
54 #include "mirror/reference.h"
55 #include "mirror/string.h"
56 #include "parallel_move_resolver.h"
57 #include "scoped_thread_state_change-inl.h"
58 #include "ssa_liveness_analysis.h"
59 #include "stack_map.h"
60 #include "stack_map_stream.h"
61 #include "string_builder_append.h"
62 #include "thread-current-inl.h"
63 #include "utils/assembler.h"
64
65 namespace art {
66
67 // Return whether a location is consistent with a type.
CheckType(DataType::Type type,Location location)68 static bool CheckType(DataType::Type type, Location location) {
69 if (location.IsFpuRegister()
70 || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
71 return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64);
72 } else if (location.IsRegister() ||
73 (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
74 return DataType::IsIntegralType(type) || (type == DataType::Type::kReference);
75 } else if (location.IsRegisterPair()) {
76 return type == DataType::Type::kInt64;
77 } else if (location.IsFpuRegisterPair()) {
78 return type == DataType::Type::kFloat64;
79 } else if (location.IsStackSlot()) {
80 return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64)
81 || (type == DataType::Type::kFloat32)
82 || (type == DataType::Type::kReference);
83 } else if (location.IsDoubleStackSlot()) {
84 return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
85 } else if (location.IsConstant()) {
86 if (location.GetConstant()->IsIntConstant()) {
87 return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64);
88 } else if (location.GetConstant()->IsNullConstant()) {
89 return type == DataType::Type::kReference;
90 } else if (location.GetConstant()->IsLongConstant()) {
91 return type == DataType::Type::kInt64;
92 } else if (location.GetConstant()->IsFloatConstant()) {
93 return type == DataType::Type::kFloat32;
94 } else {
95 return location.GetConstant()->IsDoubleConstant()
96 && (type == DataType::Type::kFloat64);
97 }
98 } else {
99 return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
100 }
101 }
102
103 // Check that a location summary is consistent with an instruction.
CheckTypeConsistency(HInstruction * instruction)104 static bool CheckTypeConsistency(HInstruction* instruction) {
105 LocationSummary* locations = instruction->GetLocations();
106 if (locations == nullptr) {
107 return true;
108 }
109
110 if (locations->Out().IsUnallocated()
111 && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
112 DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
113 << instruction->GetType()
114 << " " << locations->InAt(0);
115 } else {
116 DCHECK(CheckType(instruction->GetType(), locations->Out()))
117 << instruction->GetType()
118 << " " << locations->Out();
119 }
120
121 HConstInputsRef inputs = instruction->GetInputs();
122 for (size_t i = 0; i < inputs.size(); ++i) {
123 DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
124 << inputs[i]->GetType() << " " << locations->InAt(i);
125 }
126
127 HEnvironment* environment = instruction->GetEnvironment();
128 for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
129 if (environment->GetInstructionAt(i) != nullptr) {
130 DataType::Type type = environment->GetInstructionAt(i)->GetType();
131 DCHECK(CheckType(type, environment->GetLocationAt(i)))
132 << type << " " << environment->GetLocationAt(i);
133 } else {
134 DCHECK(environment->GetLocationAt(i).IsInvalid())
135 << environment->GetLocationAt(i);
136 }
137 }
138 return true;
139 }
140
141 class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllocCodeGenerator> {
142 public:
Create(ArenaStack * arena_stack,InstructionSet instruction_set)143 static std::unique_ptr<CodeGenerationData> Create(ArenaStack* arena_stack,
144 InstructionSet instruction_set) {
145 ScopedArenaAllocator allocator(arena_stack);
146 void* memory = allocator.Alloc<CodeGenerationData>(kArenaAllocCodeGenerator);
147 return std::unique_ptr<CodeGenerationData>(
148 ::new (memory) CodeGenerationData(std::move(allocator), instruction_set));
149 }
150
GetScopedAllocator()151 ScopedArenaAllocator* GetScopedAllocator() {
152 return &allocator_;
153 }
154
AddSlowPath(SlowPathCode * slow_path)155 void AddSlowPath(SlowPathCode* slow_path) {
156 slow_paths_.emplace_back(std::unique_ptr<SlowPathCode>(slow_path));
157 }
158
GetSlowPaths() const159 ArrayRef<const std::unique_ptr<SlowPathCode>> GetSlowPaths() const {
160 return ArrayRef<const std::unique_ptr<SlowPathCode>>(slow_paths_);
161 }
162
GetStackMapStream()163 StackMapStream* GetStackMapStream() { return &stack_map_stream_; }
164
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)165 void ReserveJitStringRoot(StringReference string_reference, Handle<mirror::String> string) {
166 jit_string_roots_.Overwrite(string_reference,
167 reinterpret_cast64<uint64_t>(string.GetReference()));
168 }
169
GetJitStringRootIndex(StringReference string_reference) const170 uint64_t GetJitStringRootIndex(StringReference string_reference) const {
171 return jit_string_roots_.Get(string_reference);
172 }
173
GetNumberOfJitStringRoots() const174 size_t GetNumberOfJitStringRoots() const {
175 return jit_string_roots_.size();
176 }
177
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)178 void ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
179 jit_class_roots_.Overwrite(type_reference, reinterpret_cast64<uint64_t>(klass.GetReference()));
180 }
181
GetJitClassRootIndex(TypeReference type_reference) const182 uint64_t GetJitClassRootIndex(TypeReference type_reference) const {
183 return jit_class_roots_.Get(type_reference);
184 }
185
GetNumberOfJitClassRoots() const186 size_t GetNumberOfJitClassRoots() const {
187 return jit_class_roots_.size();
188 }
189
GetNumberOfJitRoots() const190 size_t GetNumberOfJitRoots() const {
191 return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
192 }
193
194 void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots)
195 REQUIRES_SHARED(Locks::mutator_lock_);
196
197 private:
CodeGenerationData(ScopedArenaAllocator && allocator,InstructionSet instruction_set)198 CodeGenerationData(ScopedArenaAllocator&& allocator, InstructionSet instruction_set)
199 : allocator_(std::move(allocator)),
200 stack_map_stream_(&allocator_, instruction_set),
201 slow_paths_(allocator_.Adapter(kArenaAllocCodeGenerator)),
202 jit_string_roots_(StringReferenceValueComparator(),
203 allocator_.Adapter(kArenaAllocCodeGenerator)),
204 jit_class_roots_(TypeReferenceValueComparator(),
205 allocator_.Adapter(kArenaAllocCodeGenerator)) {
206 slow_paths_.reserve(kDefaultSlowPathsCapacity);
207 }
208
209 static constexpr size_t kDefaultSlowPathsCapacity = 8;
210
211 ScopedArenaAllocator allocator_;
212 StackMapStream stack_map_stream_;
213 ScopedArenaVector<std::unique_ptr<SlowPathCode>> slow_paths_;
214
215 // Maps a StringReference (dex_file, string_index) to the index in the literal table.
216 // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
217 // will compute all the indices.
218 ScopedArenaSafeMap<StringReference, uint64_t, StringReferenceValueComparator> jit_string_roots_;
219
220 // Maps a ClassReference (dex_file, type_index) to the index in the literal table.
221 // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots`
222 // will compute all the indices.
223 ScopedArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_;
224 };
225
EmitJitRoots(std::vector<Handle<mirror::Object>> * roots)226 void CodeGenerator::CodeGenerationData::EmitJitRoots(
227 /*out*/std::vector<Handle<mirror::Object>>* roots) {
228 DCHECK(roots->empty());
229 roots->reserve(GetNumberOfJitRoots());
230 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
231 size_t index = 0;
232 for (auto& entry : jit_string_roots_) {
233 // Update the `roots` with the string, and replace the address temporarily
234 // stored to the index in the table.
235 uint64_t address = entry.second;
236 roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
237 DCHECK(roots->back() != nullptr);
238 DCHECK(roots->back()->IsString());
239 entry.second = index;
240 // Ensure the string is strongly interned. This is a requirement on how the JIT
241 // handles strings. b/32995596
242 class_linker->GetInternTable()->InternStrong(roots->back()->AsString());
243 ++index;
244 }
245 for (auto& entry : jit_class_roots_) {
246 // Update the `roots` with the class, and replace the address temporarily
247 // stored to the index in the table.
248 uint64_t address = entry.second;
249 roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
250 DCHECK(roots->back() != nullptr);
251 DCHECK(roots->back()->IsClass());
252 entry.second = index;
253 ++index;
254 }
255 }
256
GetScopedAllocator()257 ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
258 DCHECK(code_generation_data_ != nullptr);
259 return code_generation_data_->GetScopedAllocator();
260 }
261
GetStackMapStream()262 StackMapStream* CodeGenerator::GetStackMapStream() {
263 DCHECK(code_generation_data_ != nullptr);
264 return code_generation_data_->GetStackMapStream();
265 }
266
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)267 void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
268 Handle<mirror::String> string) {
269 DCHECK(code_generation_data_ != nullptr);
270 code_generation_data_->ReserveJitStringRoot(string_reference, string);
271 }
272
GetJitStringRootIndex(StringReference string_reference)273 uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
274 DCHECK(code_generation_data_ != nullptr);
275 return code_generation_data_->GetJitStringRootIndex(string_reference);
276 }
277
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)278 void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
279 DCHECK(code_generation_data_ != nullptr);
280 code_generation_data_->ReserveJitClassRoot(type_reference, klass);
281 }
282
GetJitClassRootIndex(TypeReference type_reference)283 uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
284 DCHECK(code_generation_data_ != nullptr);
285 return code_generation_data_->GetJitClassRootIndex(type_reference);
286 }
287
EmitJitRootPatches(uint8_t * code ATTRIBUTE_UNUSED,const uint8_t * roots_data ATTRIBUTE_UNUSED)288 void CodeGenerator::EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED,
289 const uint8_t* roots_data ATTRIBUTE_UNUSED) {
290 DCHECK(code_generation_data_ != nullptr);
291 DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
292 DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
293 }
294
GetArrayLengthOffset(HArrayLength * array_length)295 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
296 return array_length->IsStringLength()
297 ? mirror::String::CountOffset().Uint32Value()
298 : mirror::Array::LengthOffset().Uint32Value();
299 }
300
GetArrayDataOffset(HArrayGet * array_get)301 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
302 DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt());
303 return array_get->IsStringCharAt()
304 ? mirror::String::ValueOffset().Uint32Value()
305 : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value();
306 }
307
GoesToNextBlock(HBasicBlock * current,HBasicBlock * next) const308 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
309 DCHECK_EQ((*block_order_)[current_block_index_], current);
310 return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
311 }
312
GetNextBlockToEmit() const313 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
314 for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
315 HBasicBlock* block = (*block_order_)[i];
316 if (!block->IsSingleJump()) {
317 return block;
318 }
319 }
320 return nullptr;
321 }
322
FirstNonEmptyBlock(HBasicBlock * block) const323 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
324 while (block->IsSingleJump()) {
325 block = block->GetSuccessors()[0];
326 }
327 return block;
328 }
329
330 class DisassemblyScope {
331 public:
DisassemblyScope(HInstruction * instruction,const CodeGenerator & codegen)332 DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
333 : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
334 if (codegen_.GetDisassemblyInformation() != nullptr) {
335 start_offset_ = codegen_.GetAssembler().CodeSize();
336 }
337 }
338
~DisassemblyScope()339 ~DisassemblyScope() {
340 // We avoid building this data when we know it will not be used.
341 if (codegen_.GetDisassemblyInformation() != nullptr) {
342 codegen_.GetDisassemblyInformation()->AddInstructionInterval(
343 instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
344 }
345 }
346
347 private:
348 const CodeGenerator& codegen_;
349 HInstruction* instruction_;
350 size_t start_offset_;
351 };
352
353
GenerateSlowPaths()354 void CodeGenerator::GenerateSlowPaths() {
355 DCHECK(code_generation_data_ != nullptr);
356 size_t code_start = 0;
357 for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
358 SlowPathCode* slow_path = slow_path_ptr.get();
359 current_slow_path_ = slow_path;
360 if (disasm_info_ != nullptr) {
361 code_start = GetAssembler()->CodeSize();
362 }
363 // Record the dex pc at start of slow path (required for java line number mapping).
364 MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
365 slow_path->EmitNativeCode(this);
366 if (disasm_info_ != nullptr) {
367 disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
368 }
369 }
370 current_slow_path_ = nullptr;
371 }
372
InitializeCodeGenerationData()373 void CodeGenerator::InitializeCodeGenerationData() {
374 DCHECK(code_generation_data_ == nullptr);
375 code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
376 }
377
Compile(CodeAllocator * allocator)378 void CodeGenerator::Compile(CodeAllocator* allocator) {
379 InitializeCodeGenerationData();
380
381 // The register allocator already called `InitializeCodeGeneration`,
382 // where the frame size has been computed.
383 DCHECK(block_order_ != nullptr);
384 Initialize();
385
386 HGraphVisitor* instruction_visitor = GetInstructionVisitor();
387 DCHECK_EQ(current_block_index_, 0u);
388
389 GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_,
390 core_spill_mask_,
391 fpu_spill_mask_,
392 GetGraph()->GetNumberOfVRegs(),
393 GetGraph()->IsCompilingBaseline());
394
395 size_t frame_start = GetAssembler()->CodeSize();
396 GenerateFrameEntry();
397 DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
398 if (disasm_info_ != nullptr) {
399 disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
400 }
401
402 for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
403 HBasicBlock* block = (*block_order_)[current_block_index_];
404 // Don't generate code for an empty block. Its predecessors will branch to its successor
405 // directly. Also, the label of that block will not be emitted, so this helps catch
406 // errors where we reference that label.
407 if (block->IsSingleJump()) continue;
408 Bind(block);
409 // This ensures that we have correct native line mapping for all native instructions.
410 // It is necessary to make stepping over a statement work. Otherwise, any initial
411 // instructions (e.g. moves) would be assumed to be the start of next statement.
412 MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
413 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
414 HInstruction* current = it.Current();
415 if (current->HasEnvironment()) {
416 // Create stackmap for HNativeDebugInfo or any instruction which calls native code.
417 // Note that we need correct mapping for the native PC of the call instruction,
418 // so the runtime's stackmap is not sufficient since it is at PC after the call.
419 MaybeRecordNativeDebugInfo(current, block->GetDexPc());
420 }
421 DisassemblyScope disassembly_scope(current, *this);
422 DCHECK(CheckTypeConsistency(current));
423 current->Accept(instruction_visitor);
424 }
425 }
426
427 GenerateSlowPaths();
428
429 // Emit catch stack maps at the end of the stack map stream as expected by the
430 // runtime exception handler.
431 if (graph_->HasTryCatch()) {
432 RecordCatchBlockInfo();
433 }
434
435 // Finalize instructions in assember;
436 Finalize(allocator);
437
438 GetStackMapStream()->EndMethod(GetAssembler()->CodeSize());
439 }
440
Finalize(CodeAllocator * allocator)441 void CodeGenerator::Finalize(CodeAllocator* allocator) {
442 size_t code_size = GetAssembler()->CodeSize();
443 uint8_t* buffer = allocator->Allocate(code_size);
444
445 MemoryRegion code(buffer, code_size);
446 GetAssembler()->FinalizeInstructions(code);
447 }
448
EmitLinkerPatches(ArenaVector<linker::LinkerPatch> * linker_patches ATTRIBUTE_UNUSED)449 void CodeGenerator::EmitLinkerPatches(
450 ArenaVector<linker::LinkerPatch>* linker_patches ATTRIBUTE_UNUSED) {
451 // No linker patches by default.
452 }
453
NeedsThunkCode(const linker::LinkerPatch & patch ATTRIBUTE_UNUSED) const454 bool CodeGenerator::NeedsThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED) const {
455 // Code generators that create patches requiring thunk compilation should override this function.
456 return false;
457 }
458
EmitThunkCode(const linker::LinkerPatch & patch ATTRIBUTE_UNUSED,ArenaVector<uint8_t> * code ATTRIBUTE_UNUSED,std::string * debug_name ATTRIBUTE_UNUSED)459 void CodeGenerator::EmitThunkCode(const linker::LinkerPatch& patch ATTRIBUTE_UNUSED,
460 /*out*/ ArenaVector<uint8_t>* code ATTRIBUTE_UNUSED,
461 /*out*/ std::string* debug_name ATTRIBUTE_UNUSED) {
462 // Code generators that create patches requiring thunk compilation should override this function.
463 LOG(FATAL) << "Unexpected call to EmitThunkCode().";
464 }
465
InitializeCodeGeneration(size_t number_of_spill_slots,size_t maximum_safepoint_spill_size,size_t number_of_out_slots,const ArenaVector<HBasicBlock * > & block_order)466 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
467 size_t maximum_safepoint_spill_size,
468 size_t number_of_out_slots,
469 const ArenaVector<HBasicBlock*>& block_order) {
470 block_order_ = &block_order;
471 DCHECK(!block_order.empty());
472 DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
473 ComputeSpillMask();
474 first_register_slot_in_slow_path_ = RoundUp(
475 (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
476
477 if (number_of_spill_slots == 0
478 && !HasAllocatedCalleeSaveRegisters()
479 && IsLeafMethod()
480 && !RequiresCurrentMethod()) {
481 DCHECK_EQ(maximum_safepoint_spill_size, 0u);
482 SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
483 } else {
484 SetFrameSize(RoundUp(
485 first_register_slot_in_slow_path_
486 + maximum_safepoint_spill_size
487 + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
488 + FrameEntrySpillSize(),
489 kStackAlignment));
490 }
491 }
492
CreateCommonInvokeLocationSummary(HInvoke * invoke,InvokeDexCallingConventionVisitor * visitor)493 void CodeGenerator::CreateCommonInvokeLocationSummary(
494 HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
495 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
496 LocationSummary* locations = new (allocator) LocationSummary(invoke,
497 LocationSummary::kCallOnMainOnly);
498
499 for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
500 HInstruction* input = invoke->InputAt(i);
501 locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
502 }
503
504 locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
505
506 if (invoke->IsInvokeStaticOrDirect()) {
507 HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
508 MethodLoadKind method_load_kind = call->GetMethodLoadKind();
509 CodePtrLocation code_ptr_location = call->GetCodePtrLocation();
510 if (code_ptr_location == CodePtrLocation::kCallCriticalNative) {
511 locations->AddTemp(Location::RequiresRegister()); // For target method.
512 }
513 if (code_ptr_location == CodePtrLocation::kCallCriticalNative ||
514 method_load_kind == MethodLoadKind::kRecursive) {
515 // For `kCallCriticalNative` we need the current method as the hidden argument
516 // if we reach the dlsym lookup stub for @CriticalNative.
517 locations->SetInAt(call->GetCurrentMethodIndex(), visitor->GetMethodLocation());
518 } else {
519 locations->AddTemp(visitor->GetMethodLocation());
520 if (method_load_kind == MethodLoadKind::kRuntimeCall) {
521 locations->SetInAt(call->GetCurrentMethodIndex(), Location::RequiresRegister());
522 }
523 }
524 } else if (!invoke->IsInvokePolymorphic()) {
525 locations->AddTemp(visitor->GetMethodLocation());
526 }
527 }
528
PrepareCriticalNativeArgumentMoves(HInvokeStaticOrDirect * invoke,InvokeDexCallingConventionVisitor * visitor,HParallelMove * parallel_move)529 void CodeGenerator::PrepareCriticalNativeArgumentMoves(
530 HInvokeStaticOrDirect* invoke,
531 /*inout*/InvokeDexCallingConventionVisitor* visitor,
532 /*out*/HParallelMove* parallel_move) {
533 LocationSummary* locations = invoke->GetLocations();
534 for (size_t i = 0, num = invoke->GetNumberOfArguments(); i != num; ++i) {
535 Location in_location = locations->InAt(i);
536 DataType::Type type = invoke->InputAt(i)->GetType();
537 DCHECK_NE(type, DataType::Type::kReference);
538 Location out_location = visitor->GetNextLocation(type);
539 if (out_location.IsStackSlot() || out_location.IsDoubleStackSlot()) {
540 // Stack arguments will need to be moved after adjusting the SP.
541 parallel_move->AddMove(in_location, out_location, type, /*instruction=*/ nullptr);
542 } else {
543 // Register arguments should have been assigned their final locations for register allocation.
544 DCHECK(out_location.Equals(in_location)) << in_location << " -> " << out_location;
545 }
546 }
547 }
548
FinishCriticalNativeFrameSetup(size_t out_frame_size,HParallelMove * parallel_move)549 void CodeGenerator::FinishCriticalNativeFrameSetup(size_t out_frame_size,
550 /*inout*/HParallelMove* parallel_move) {
551 DCHECK_NE(out_frame_size, 0u);
552 IncreaseFrame(out_frame_size);
553 // Adjust the source stack offsets by `out_frame_size`, i.e. the additional
554 // frame size needed for outgoing stack arguments.
555 for (size_t i = 0, num = parallel_move->NumMoves(); i != num; ++i) {
556 MoveOperands* operands = parallel_move->MoveOperandsAt(i);
557 Location source = operands->GetSource();
558 if (operands->GetSource().IsStackSlot()) {
559 operands->SetSource(Location::StackSlot(source.GetStackIndex() + out_frame_size));
560 } else if (operands->GetSource().IsDoubleStackSlot()) {
561 operands->SetSource(Location::DoubleStackSlot(source.GetStackIndex() + out_frame_size));
562 }
563 }
564 // Emit the moves.
565 GetMoveResolver()->EmitNativeCode(parallel_move);
566 }
567
GetCriticalNativeShorty(HInvokeStaticOrDirect * invoke,uint32_t * shorty_len)568 const char* CodeGenerator::GetCriticalNativeShorty(HInvokeStaticOrDirect* invoke,
569 uint32_t* shorty_len) {
570 ScopedObjectAccess soa(Thread::Current());
571 DCHECK(invoke->GetResolvedMethod()->IsCriticalNative());
572 return invoke->GetResolvedMethod()->GetShorty(shorty_len);
573 }
574
GenerateInvokeStaticOrDirectRuntimeCall(HInvokeStaticOrDirect * invoke,Location temp,SlowPathCode * slow_path)575 void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
576 HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
577 MethodReference method_reference(invoke->GetMethodReference());
578 MoveConstant(temp, method_reference.index);
579
580 // The access check is unnecessary but we do not want to introduce
581 // extra entrypoints for the codegens that do not support some
582 // invoke type and fall back to the runtime call.
583
584 // Initialize to anything to silent compiler warnings.
585 QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
586 switch (invoke->GetInvokeType()) {
587 case kStatic:
588 entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
589 break;
590 case kDirect:
591 entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
592 break;
593 case kSuper:
594 entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
595 break;
596 case kVirtual:
597 case kInterface:
598 case kPolymorphic:
599 case kCustom:
600 LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
601 UNREACHABLE();
602 }
603
604 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
605 }
GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved * invoke)606 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
607 MethodReference method_reference(invoke->GetMethodReference());
608 MoveConstant(invoke->GetLocations()->GetTemp(0), method_reference.index);
609
610 // Initialize to anything to silent compiler warnings.
611 QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
612 switch (invoke->GetInvokeType()) {
613 case kStatic:
614 entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
615 break;
616 case kDirect:
617 entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
618 break;
619 case kVirtual:
620 entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
621 break;
622 case kSuper:
623 entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
624 break;
625 case kInterface:
626 entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
627 break;
628 case kPolymorphic:
629 case kCustom:
630 LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
631 UNREACHABLE();
632 }
633 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
634 }
635
GenerateInvokePolymorphicCall(HInvokePolymorphic * invoke,SlowPathCode * slow_path)636 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke,
637 SlowPathCode* slow_path) {
638 // invoke-polymorphic does not use a temporary to convey any additional information (e.g. a
639 // method index) since it requires multiple info from the instruction (registers A, B, H). Not
640 // using the reservation has no effect on the registers used in the runtime call.
641 QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
642 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
643 }
644
GenerateInvokeCustomCall(HInvokeCustom * invoke)645 void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) {
646 MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetCallSiteIndex());
647 QuickEntrypointEnum entrypoint = kQuickInvokeCustom;
648 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
649 }
650
CreateStringBuilderAppendLocations(HStringBuilderAppend * instruction,Location out)651 void CodeGenerator::CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction,
652 Location out) {
653 ArenaAllocator* allocator = GetGraph()->GetAllocator();
654 LocationSummary* locations =
655 new (allocator) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
656 locations->SetOut(out);
657 instruction->GetLocations()->SetInAt(instruction->FormatIndex(),
658 Location::ConstantLocation(instruction->GetFormat()));
659
660 uint32_t format = static_cast<uint32_t>(instruction->GetFormat()->GetValue());
661 uint32_t f = format;
662 PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
663 size_t stack_offset = static_cast<size_t>(pointer_size); // Start after the ArtMethod*.
664 for (size_t i = 0, num_args = instruction->GetNumberOfArguments(); i != num_args; ++i) {
665 StringBuilderAppend::Argument arg_type =
666 static_cast<StringBuilderAppend::Argument>(f & StringBuilderAppend::kArgMask);
667 switch (arg_type) {
668 case StringBuilderAppend::Argument::kStringBuilder:
669 case StringBuilderAppend::Argument::kString:
670 case StringBuilderAppend::Argument::kCharArray:
671 static_assert(sizeof(StackReference<mirror::Object>) == sizeof(uint32_t), "Size check.");
672 FALLTHROUGH_INTENDED;
673 case StringBuilderAppend::Argument::kBoolean:
674 case StringBuilderAppend::Argument::kChar:
675 case StringBuilderAppend::Argument::kInt:
676 case StringBuilderAppend::Argument::kFloat:
677 locations->SetInAt(i, Location::StackSlot(stack_offset));
678 break;
679 case StringBuilderAppend::Argument::kLong:
680 case StringBuilderAppend::Argument::kDouble:
681 stack_offset = RoundUp(stack_offset, sizeof(uint64_t));
682 locations->SetInAt(i, Location::DoubleStackSlot(stack_offset));
683 // Skip the low word, let the common code skip the high word.
684 stack_offset += sizeof(uint32_t);
685 break;
686 default:
687 LOG(FATAL) << "Unexpected arg format: 0x" << std::hex
688 << (f & StringBuilderAppend::kArgMask) << " full format: 0x" << format;
689 UNREACHABLE();
690 }
691 f >>= StringBuilderAppend::kBitsPerArg;
692 stack_offset += sizeof(uint32_t);
693 }
694 DCHECK_EQ(f, 0u);
695
696 size_t param_size = stack_offset - static_cast<size_t>(pointer_size);
697 DCHECK_ALIGNED(param_size, kVRegSize);
698 size_t num_vregs = param_size / kVRegSize;
699 graph_->UpdateMaximumNumberOfOutVRegs(num_vregs);
700 }
701
CreateUnresolvedFieldLocationSummary(HInstruction * field_access,DataType::Type field_type,const FieldAccessCallingConvention & calling_convention)702 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
703 HInstruction* field_access,
704 DataType::Type field_type,
705 const FieldAccessCallingConvention& calling_convention) {
706 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
707 || field_access->IsUnresolvedInstanceFieldSet();
708 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
709 || field_access->IsUnresolvedStaticFieldGet();
710
711 ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
712 LocationSummary* locations =
713 new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
714
715 locations->AddTemp(calling_convention.GetFieldIndexLocation());
716
717 if (is_instance) {
718 // Add the `this` object for instance field accesses.
719 locations->SetInAt(0, calling_convention.GetObjectLocation());
720 }
721
722 // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
723 // regardless of the the type. Because of that we forced to special case
724 // the access to floating point values.
725 if (is_get) {
726 if (DataType::IsFloatingPointType(field_type)) {
727 // The return value will be stored in regular registers while register
728 // allocator expects it in a floating point register.
729 // Note We don't need to request additional temps because the return
730 // register(s) are already blocked due the call and they may overlap with
731 // the input or field index.
732 // The transfer between the two will be done at codegen level.
733 locations->SetOut(calling_convention.GetFpuLocation(field_type));
734 } else {
735 locations->SetOut(calling_convention.GetReturnLocation(field_type));
736 }
737 } else {
738 size_t set_index = is_instance ? 1 : 0;
739 if (DataType::IsFloatingPointType(field_type)) {
740 // The set value comes from a float location while the calling convention
741 // expects it in a regular register location. Allocate a temp for it and
742 // make the transfer at codegen.
743 AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
744 locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
745 } else {
746 locations->SetInAt(set_index,
747 calling_convention.GetSetValueLocation(field_type, is_instance));
748 }
749 }
750 }
751
GenerateUnresolvedFieldAccess(HInstruction * field_access,DataType::Type field_type,uint32_t field_index,uint32_t dex_pc,const FieldAccessCallingConvention & calling_convention)752 void CodeGenerator::GenerateUnresolvedFieldAccess(
753 HInstruction* field_access,
754 DataType::Type field_type,
755 uint32_t field_index,
756 uint32_t dex_pc,
757 const FieldAccessCallingConvention& calling_convention) {
758 LocationSummary* locations = field_access->GetLocations();
759
760 MoveConstant(locations->GetTemp(0), field_index);
761
762 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
763 || field_access->IsUnresolvedInstanceFieldSet();
764 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
765 || field_access->IsUnresolvedStaticFieldGet();
766
767 if (!is_get && DataType::IsFloatingPointType(field_type)) {
768 // Copy the float value to be set into the calling convention register.
769 // Note that using directly the temp location is problematic as we don't
770 // support temp register pairs. To avoid boilerplate conversion code, use
771 // the location from the calling convention.
772 MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
773 locations->InAt(is_instance ? 1 : 0),
774 (DataType::Is64BitType(field_type) ? DataType::Type::kInt64
775 : DataType::Type::kInt32));
776 }
777
778 QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings.
779 switch (field_type) {
780 case DataType::Type::kBool:
781 entrypoint = is_instance
782 ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
783 : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
784 break;
785 case DataType::Type::kInt8:
786 entrypoint = is_instance
787 ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
788 : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
789 break;
790 case DataType::Type::kInt16:
791 entrypoint = is_instance
792 ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
793 : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
794 break;
795 case DataType::Type::kUint16:
796 entrypoint = is_instance
797 ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
798 : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
799 break;
800 case DataType::Type::kInt32:
801 case DataType::Type::kFloat32:
802 entrypoint = is_instance
803 ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
804 : (is_get ? kQuickGet32Static : kQuickSet32Static);
805 break;
806 case DataType::Type::kReference:
807 entrypoint = is_instance
808 ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
809 : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
810 break;
811 case DataType::Type::kInt64:
812 case DataType::Type::kFloat64:
813 entrypoint = is_instance
814 ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
815 : (is_get ? kQuickGet64Static : kQuickSet64Static);
816 break;
817 default:
818 LOG(FATAL) << "Invalid type " << field_type;
819 }
820 InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
821
822 if (is_get && DataType::IsFloatingPointType(field_type)) {
823 MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
824 }
825 }
826
CreateLoadClassRuntimeCallLocationSummary(HLoadClass * cls,Location runtime_type_index_location,Location runtime_return_location)827 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
828 Location runtime_type_index_location,
829 Location runtime_return_location) {
830 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
831 DCHECK_EQ(cls->InputCount(), 1u);
832 LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
833 cls, LocationSummary::kCallOnMainOnly);
834 locations->SetInAt(0, Location::NoLocation());
835 locations->AddTemp(runtime_type_index_location);
836 locations->SetOut(runtime_return_location);
837 }
838
GenerateLoadClassRuntimeCall(HLoadClass * cls)839 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
840 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
841 DCHECK(!cls->MustGenerateClinitCheck());
842 LocationSummary* locations = cls->GetLocations();
843 MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
844 if (cls->NeedsAccessCheck()) {
845 CheckEntrypointTypes<kQuickResolveTypeAndVerifyAccess, void*, uint32_t>();
846 InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls, cls->GetDexPc());
847 } else {
848 CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
849 InvokeRuntime(kQuickResolveType, cls, cls->GetDexPc());
850 }
851 }
852
CreateLoadMethodHandleRuntimeCallLocationSummary(HLoadMethodHandle * method_handle,Location runtime_proto_index_location,Location runtime_return_location)853 void CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(
854 HLoadMethodHandle* method_handle,
855 Location runtime_proto_index_location,
856 Location runtime_return_location) {
857 DCHECK_EQ(method_handle->InputCount(), 1u);
858 LocationSummary* locations =
859 new (method_handle->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
860 method_handle, LocationSummary::kCallOnMainOnly);
861 locations->SetInAt(0, Location::NoLocation());
862 locations->AddTemp(runtime_proto_index_location);
863 locations->SetOut(runtime_return_location);
864 }
865
GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle * method_handle)866 void CodeGenerator::GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle) {
867 LocationSummary* locations = method_handle->GetLocations();
868 MoveConstant(locations->GetTemp(0), method_handle->GetMethodHandleIndex());
869 CheckEntrypointTypes<kQuickResolveMethodHandle, void*, uint32_t>();
870 InvokeRuntime(kQuickResolveMethodHandle, method_handle, method_handle->GetDexPc());
871 }
872
CreateLoadMethodTypeRuntimeCallLocationSummary(HLoadMethodType * method_type,Location runtime_proto_index_location,Location runtime_return_location)873 void CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(
874 HLoadMethodType* method_type,
875 Location runtime_proto_index_location,
876 Location runtime_return_location) {
877 DCHECK_EQ(method_type->InputCount(), 1u);
878 LocationSummary* locations =
879 new (method_type->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
880 method_type, LocationSummary::kCallOnMainOnly);
881 locations->SetInAt(0, Location::NoLocation());
882 locations->AddTemp(runtime_proto_index_location);
883 locations->SetOut(runtime_return_location);
884 }
885
GenerateLoadMethodTypeRuntimeCall(HLoadMethodType * method_type)886 void CodeGenerator::GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type) {
887 LocationSummary* locations = method_type->GetLocations();
888 MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex().index_);
889 CheckEntrypointTypes<kQuickResolveMethodType, void*, uint32_t>();
890 InvokeRuntime(kQuickResolveMethodType, method_type, method_type->GetDexPc());
891 }
892
GetBootImageOffsetImpl(const void * object,ImageHeader::ImageSections section)893 static uint32_t GetBootImageOffsetImpl(const void* object, ImageHeader::ImageSections section) {
894 Runtime* runtime = Runtime::Current();
895 const std::vector<gc::space::ImageSpace*>& boot_image_spaces =
896 runtime->GetHeap()->GetBootImageSpaces();
897 // Check that the `object` is in the expected section of one of the boot image files.
898 DCHECK(std::any_of(boot_image_spaces.begin(),
899 boot_image_spaces.end(),
900 [object, section](gc::space::ImageSpace* space) {
901 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
902 uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
903 return space->GetImageHeader().GetImageSection(section).Contains(offset);
904 }));
905 uintptr_t begin = reinterpret_cast<uintptr_t>(boot_image_spaces.front()->Begin());
906 uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
907 return dchecked_integral_cast<uint32_t>(offset);
908 }
909
GetBootImageOffset(ObjPtr<mirror::Object> object)910 uint32_t CodeGenerator::GetBootImageOffset(ObjPtr<mirror::Object> object) {
911 return GetBootImageOffsetImpl(object.Ptr(), ImageHeader::kSectionObjects);
912 }
913
914 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffset(HLoadClass * load_class)915 uint32_t CodeGenerator::GetBootImageOffset(HLoadClass* load_class) NO_THREAD_SAFETY_ANALYSIS {
916 DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kBootImageRelRo);
917 ObjPtr<mirror::Class> klass = load_class->GetClass().Get();
918 DCHECK(klass != nullptr);
919 return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
920 }
921
922 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image strings are non-moveable.
GetBootImageOffset(HLoadString * load_string)923 uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_SAFETY_ANALYSIS {
924 DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kBootImageRelRo);
925 ObjPtr<mirror::String> string = load_string->GetString().Get();
926 DCHECK(string != nullptr);
927 return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
928 }
929
GetBootImageOffset(HInvoke * invoke)930 uint32_t CodeGenerator::GetBootImageOffset(HInvoke* invoke) {
931 ArtMethod* method = invoke->GetResolvedMethod();
932 DCHECK(method != nullptr);
933 return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
934 }
935
936 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image objects are non-moveable.
GetBootImageOffset(ClassRoot class_root)937 uint32_t CodeGenerator::GetBootImageOffset(ClassRoot class_root) NO_THREAD_SAFETY_ANALYSIS {
938 ObjPtr<mirror::Class> klass = GetClassRoot<kWithoutReadBarrier>(class_root);
939 return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
940 }
941
942 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffsetOfIntrinsicDeclaringClass(HInvoke * invoke)943 uint32_t CodeGenerator::GetBootImageOffsetOfIntrinsicDeclaringClass(HInvoke* invoke)
944 NO_THREAD_SAFETY_ANALYSIS {
945 DCHECK_NE(invoke->GetIntrinsic(), Intrinsics::kNone);
946 ArtMethod* method = invoke->GetResolvedMethod();
947 DCHECK(method != nullptr);
948 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass<kWithoutReadBarrier>();
949 return GetBootImageOffsetImpl(declaring_class.Ptr(), ImageHeader::kSectionObjects);
950 }
951
BlockIfInRegister(Location location,bool is_out) const952 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
953 // The DCHECKS below check that a register is not specified twice in
954 // the summary. The out location can overlap with an input, so we need
955 // to special case it.
956 if (location.IsRegister()) {
957 DCHECK(is_out || !blocked_core_registers_[location.reg()]);
958 blocked_core_registers_[location.reg()] = true;
959 } else if (location.IsFpuRegister()) {
960 DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
961 blocked_fpu_registers_[location.reg()] = true;
962 } else if (location.IsFpuRegisterPair()) {
963 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
964 blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
965 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
966 blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
967 } else if (location.IsRegisterPair()) {
968 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
969 blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
970 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
971 blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
972 }
973 }
974
AllocateLocations(HInstruction * instruction)975 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
976 for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
977 env->AllocateLocations();
978 }
979 instruction->Accept(GetLocationBuilder());
980 DCHECK(CheckTypeConsistency(instruction));
981 LocationSummary* locations = instruction->GetLocations();
982 if (!instruction->IsSuspendCheckEntry()) {
983 if (locations != nullptr) {
984 if (locations->CanCall()) {
985 MarkNotLeaf();
986 } else if (locations->Intrinsified() &&
987 instruction->IsInvokeStaticOrDirect() &&
988 !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
989 // A static method call that has been fully intrinsified, and cannot call on the slow
990 // path or refer to the current method directly, no longer needs current method.
991 return;
992 }
993 }
994 if (instruction->NeedsCurrentMethod()) {
995 SetRequiresCurrentMethod();
996 }
997 }
998 }
999
Create(HGraph * graph,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)1000 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
1001 const CompilerOptions& compiler_options,
1002 OptimizingCompilerStats* stats) {
1003 ArenaAllocator* allocator = graph->GetAllocator();
1004 switch (compiler_options.GetInstructionSet()) {
1005 #ifdef ART_ENABLE_CODEGEN_arm
1006 case InstructionSet::kArm:
1007 case InstructionSet::kThumb2: {
1008 return std::unique_ptr<CodeGenerator>(
1009 new (allocator) arm::CodeGeneratorARMVIXL(graph, compiler_options, stats));
1010 }
1011 #endif
1012 #ifdef ART_ENABLE_CODEGEN_arm64
1013 case InstructionSet::kArm64: {
1014 return std::unique_ptr<CodeGenerator>(
1015 new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
1016 }
1017 #endif
1018 #ifdef ART_ENABLE_CODEGEN_x86
1019 case InstructionSet::kX86: {
1020 return std::unique_ptr<CodeGenerator>(
1021 new (allocator) x86::CodeGeneratorX86(graph, compiler_options, stats));
1022 }
1023 #endif
1024 #ifdef ART_ENABLE_CODEGEN_x86_64
1025 case InstructionSet::kX86_64: {
1026 return std::unique_ptr<CodeGenerator>(
1027 new (allocator) x86_64::CodeGeneratorX86_64(graph, compiler_options, stats));
1028 }
1029 #endif
1030 default:
1031 return nullptr;
1032 }
1033 }
1034
CodeGenerator(HGraph * graph,size_t number_of_core_registers,size_t number_of_fpu_registers,size_t number_of_register_pairs,uint32_t core_callee_save_mask,uint32_t fpu_callee_save_mask,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)1035 CodeGenerator::CodeGenerator(HGraph* graph,
1036 size_t number_of_core_registers,
1037 size_t number_of_fpu_registers,
1038 size_t number_of_register_pairs,
1039 uint32_t core_callee_save_mask,
1040 uint32_t fpu_callee_save_mask,
1041 const CompilerOptions& compiler_options,
1042 OptimizingCompilerStats* stats)
1043 : frame_size_(0),
1044 core_spill_mask_(0),
1045 fpu_spill_mask_(0),
1046 first_register_slot_in_slow_path_(0),
1047 allocated_registers_(RegisterSet::Empty()),
1048 blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
1049 kArenaAllocCodeGenerator)),
1050 blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
1051 kArenaAllocCodeGenerator)),
1052 number_of_core_registers_(number_of_core_registers),
1053 number_of_fpu_registers_(number_of_fpu_registers),
1054 number_of_register_pairs_(number_of_register_pairs),
1055 core_callee_save_mask_(core_callee_save_mask),
1056 fpu_callee_save_mask_(fpu_callee_save_mask),
1057 block_order_(nullptr),
1058 disasm_info_(nullptr),
1059 stats_(stats),
1060 graph_(graph),
1061 compiler_options_(compiler_options),
1062 current_slow_path_(nullptr),
1063 current_block_index_(0),
1064 is_leaf_(true),
1065 requires_current_method_(false),
1066 code_generation_data_() {
1067 if (GetGraph()->IsCompilingOsr()) {
1068 // Make OSR methods have all registers spilled, this simplifies the logic of
1069 // jumping to the compiled code directly.
1070 for (size_t i = 0; i < number_of_core_registers_; ++i) {
1071 if (IsCoreCalleeSaveRegister(i)) {
1072 AddAllocatedRegister(Location::RegisterLocation(i));
1073 }
1074 }
1075 for (size_t i = 0; i < number_of_fpu_registers_; ++i) {
1076 if (IsFloatingPointCalleeSaveRegister(i)) {
1077 AddAllocatedRegister(Location::FpuRegisterLocation(i));
1078 }
1079 }
1080 }
1081 }
1082
~CodeGenerator()1083 CodeGenerator::~CodeGenerator() {}
1084
GetNumberOfJitRoots() const1085 size_t CodeGenerator::GetNumberOfJitRoots() const {
1086 DCHECK(code_generation_data_ != nullptr);
1087 return code_generation_data_->GetNumberOfJitRoots();
1088 }
1089
CheckCovers(uint32_t dex_pc,const HGraph & graph,const CodeInfo & code_info,const ArenaVector<HSuspendCheck * > & loop_headers,ArenaVector<size_t> * covered)1090 static void CheckCovers(uint32_t dex_pc,
1091 const HGraph& graph,
1092 const CodeInfo& code_info,
1093 const ArenaVector<HSuspendCheck*>& loop_headers,
1094 ArenaVector<size_t>* covered) {
1095 for (size_t i = 0; i < loop_headers.size(); ++i) {
1096 if (loop_headers[i]->GetDexPc() == dex_pc) {
1097 if (graph.IsCompilingOsr()) {
1098 DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid());
1099 }
1100 ++(*covered)[i];
1101 }
1102 }
1103 }
1104
1105 // Debug helper to ensure loop entries in compiled code are matched by
1106 // dex branch instructions.
CheckLoopEntriesCanBeUsedForOsr(const HGraph & graph,const CodeInfo & code_info,const dex::CodeItem & code_item)1107 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
1108 const CodeInfo& code_info,
1109 const dex::CodeItem& code_item) {
1110 if (graph.HasTryCatch()) {
1111 // One can write loops through try/catch, which we do not support for OSR anyway.
1112 return;
1113 }
1114 ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
1115 for (HBasicBlock* block : graph.GetReversePostOrder()) {
1116 if (block->IsLoopHeader()) {
1117 HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
1118 if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
1119 loop_headers.push_back(suspend_check);
1120 }
1121 }
1122 }
1123 ArenaVector<size_t> covered(
1124 loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
1125 for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(),
1126 &code_item)) {
1127 const uint32_t dex_pc = pair.DexPc();
1128 const Instruction& instruction = pair.Inst();
1129 if (instruction.IsBranch()) {
1130 uint32_t target = dex_pc + instruction.GetTargetOffset();
1131 CheckCovers(target, graph, code_info, loop_headers, &covered);
1132 } else if (instruction.IsSwitch()) {
1133 DexSwitchTable table(instruction, dex_pc);
1134 uint16_t num_entries = table.GetNumEntries();
1135 size_t offset = table.GetFirstValueIndex();
1136
1137 // Use a larger loop counter type to avoid overflow issues.
1138 for (size_t i = 0; i < num_entries; ++i) {
1139 // The target of the case.
1140 uint32_t target = dex_pc + table.GetEntryAt(i + offset);
1141 CheckCovers(target, graph, code_info, loop_headers, &covered);
1142 }
1143 }
1144 }
1145
1146 for (size_t i = 0; i < covered.size(); ++i) {
1147 DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
1148 }
1149 }
1150
BuildStackMaps(const dex::CodeItem * code_item)1151 ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
1152 ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
1153 if (kIsDebugBuild && code_item != nullptr) {
1154 CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
1155 }
1156 return stack_map;
1157 }
1158
1159 // Returns whether stackmap dex register info is needed for the instruction.
1160 //
1161 // The following cases mandate having a dex register map:
1162 // * Deoptimization
1163 // when we need to obtain the values to restore actual vregisters for interpreter.
1164 // * Debuggability
1165 // when we want to observe the values / asynchronously deoptimize.
1166 // * Monitor operations
1167 // to allow dumping in a stack trace locked dex registers for non-debuggable code.
1168 // * On-stack-replacement (OSR)
1169 // when entering compiled for OSR code from the interpreter we need to initialize the compiled
1170 // code values with the values from the vregisters.
1171 // * Method local catch blocks
1172 // a catch block must see the environment of the instruction from the same method that can
1173 // throw to this block.
NeedsVregInfo(HInstruction * instruction,bool osr)1174 static bool NeedsVregInfo(HInstruction* instruction, bool osr) {
1175 HGraph* graph = instruction->GetBlock()->GetGraph();
1176 return instruction->IsDeoptimize() ||
1177 graph->IsDebuggable() ||
1178 graph->HasMonitorOperations() ||
1179 osr ||
1180 instruction->CanThrowIntoCatchBlock();
1181 }
1182
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path,bool native_debug_info)1183 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1184 uint32_t dex_pc,
1185 SlowPathCode* slow_path,
1186 bool native_debug_info) {
1187 RecordPcInfo(instruction, dex_pc, GetAssembler()->CodePosition(), slow_path, native_debug_info);
1188 }
1189
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,uint32_t native_pc,SlowPathCode * slow_path,bool native_debug_info)1190 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1191 uint32_t dex_pc,
1192 uint32_t native_pc,
1193 SlowPathCode* slow_path,
1194 bool native_debug_info) {
1195 if (instruction != nullptr) {
1196 // The code generated for some type conversions
1197 // may call the runtime, thus normally requiring a subsequent
1198 // call to this method. However, the method verifier does not
1199 // produce PC information for certain instructions, which are
1200 // considered "atomic" (they cannot join a GC).
1201 // Therefore we do not currently record PC information for such
1202 // instructions. As this may change later, we added this special
1203 // case so that code generators may nevertheless call
1204 // CodeGenerator::RecordPcInfo without triggering an error in
1205 // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
1206 // thereafter.
1207 if (instruction->IsTypeConversion()) {
1208 return;
1209 }
1210 if (instruction->IsRem()) {
1211 DataType::Type type = instruction->AsRem()->GetResultType();
1212 if ((type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64)) {
1213 return;
1214 }
1215 }
1216 }
1217
1218 StackMapStream* stack_map_stream = GetStackMapStream();
1219 if (instruction == nullptr) {
1220 // For stack overflow checks and native-debug-info entries without dex register
1221 // mapping (i.e. start of basic block or start of slow path).
1222 stack_map_stream->BeginStackMapEntry(dex_pc, native_pc);
1223 stack_map_stream->EndStackMapEntry();
1224 return;
1225 }
1226
1227 LocationSummary* locations = instruction->GetLocations();
1228 uint32_t register_mask = locations->GetRegisterMask();
1229 DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
1230 if (locations->OnlyCallsOnSlowPath()) {
1231 // In case of slow path, we currently set the location of caller-save registers
1232 // to register (instead of their stack location when pushed before the slow-path
1233 // call). Therefore register_mask contains both callee-save and caller-save
1234 // registers that hold objects. We must remove the spilled caller-save from the
1235 // mask, since they will be overwritten by the callee.
1236 uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
1237 register_mask &= ~spills;
1238 } else {
1239 // The register mask must be a subset of callee-save registers.
1240 DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
1241 }
1242
1243 uint32_t outer_dex_pc = dex_pc;
1244 uint32_t outer_environment_size = 0u;
1245 uint32_t inlining_depth = 0;
1246 HEnvironment* const environment = instruction->GetEnvironment();
1247 if (environment != nullptr) {
1248 HEnvironment* outer_environment = environment;
1249 while (outer_environment->GetParent() != nullptr) {
1250 outer_environment = outer_environment->GetParent();
1251 ++inlining_depth;
1252 }
1253 outer_dex_pc = outer_environment->GetDexPc();
1254 outer_environment_size = outer_environment->Size();
1255 }
1256
1257 HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
1258 bool osr =
1259 instruction->IsSuspendCheck() &&
1260 (info != nullptr) &&
1261 graph_->IsCompilingOsr() &&
1262 (inlining_depth == 0);
1263 StackMap::Kind kind = native_debug_info
1264 ? StackMap::Kind::Debug
1265 : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default);
1266 bool needs_vreg_info = NeedsVregInfo(instruction, osr);
1267 stack_map_stream->BeginStackMapEntry(outer_dex_pc,
1268 native_pc,
1269 register_mask,
1270 locations->GetStackMask(),
1271 kind,
1272 needs_vreg_info);
1273
1274 EmitEnvironment(environment, slow_path, needs_vreg_info);
1275 stack_map_stream->EndStackMapEntry();
1276
1277 if (osr) {
1278 DCHECK_EQ(info->GetSuspendCheck(), instruction);
1279 DCHECK(info->IsIrreducible());
1280 DCHECK(environment != nullptr);
1281 if (kIsDebugBuild) {
1282 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1283 HInstruction* in_environment = environment->GetInstructionAt(i);
1284 if (in_environment != nullptr) {
1285 DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
1286 Location location = environment->GetLocationAt(i);
1287 DCHECK(location.IsStackSlot() ||
1288 location.IsDoubleStackSlot() ||
1289 location.IsConstant() ||
1290 location.IsInvalid());
1291 if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
1292 DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
1293 }
1294 }
1295 }
1296 }
1297 }
1298 }
1299
HasStackMapAtCurrentPc()1300 bool CodeGenerator::HasStackMapAtCurrentPc() {
1301 uint32_t pc = GetAssembler()->CodeSize();
1302 StackMapStream* stack_map_stream = GetStackMapStream();
1303 size_t count = stack_map_stream->GetNumberOfStackMaps();
1304 if (count == 0) {
1305 return false;
1306 }
1307 return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc;
1308 }
1309
MaybeRecordNativeDebugInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1310 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
1311 uint32_t dex_pc,
1312 SlowPathCode* slow_path) {
1313 if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
1314 if (HasStackMapAtCurrentPc()) {
1315 // Ensure that we do not collide with the stack map of the previous instruction.
1316 GenerateNop();
1317 }
1318 RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
1319 }
1320 }
1321
RecordCatchBlockInfo()1322 void CodeGenerator::RecordCatchBlockInfo() {
1323 StackMapStream* stack_map_stream = GetStackMapStream();
1324
1325 for (HBasicBlock* block : *block_order_) {
1326 if (!block->IsCatchBlock()) {
1327 continue;
1328 }
1329
1330 uint32_t dex_pc = block->GetDexPc();
1331 uint32_t num_vregs = graph_->GetNumberOfVRegs();
1332 uint32_t native_pc = GetAddressOf(block);
1333
1334 stack_map_stream->BeginStackMapEntry(dex_pc,
1335 native_pc,
1336 /* register_mask= */ 0,
1337 /* sp_mask= */ nullptr,
1338 StackMap::Kind::Catch);
1339
1340 HInstruction* current_phi = block->GetFirstPhi();
1341 for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
1342 while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
1343 HInstruction* next_phi = current_phi->GetNext();
1344 DCHECK(next_phi == nullptr ||
1345 current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
1346 << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
1347 current_phi = next_phi;
1348 }
1349
1350 if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
1351 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1352 } else {
1353 Location location = current_phi->GetLocations()->Out();
1354 switch (location.GetKind()) {
1355 case Location::kStackSlot: {
1356 stack_map_stream->AddDexRegisterEntry(
1357 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1358 break;
1359 }
1360 case Location::kDoubleStackSlot: {
1361 stack_map_stream->AddDexRegisterEntry(
1362 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1363 stack_map_stream->AddDexRegisterEntry(
1364 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1365 ++vreg;
1366 DCHECK_LT(vreg, num_vregs);
1367 break;
1368 }
1369 default: {
1370 // All catch phis must be allocated to a stack slot.
1371 LOG(FATAL) << "Unexpected kind " << location.GetKind();
1372 UNREACHABLE();
1373 }
1374 }
1375 }
1376 }
1377
1378 stack_map_stream->EndStackMapEntry();
1379 }
1380 }
1381
AddSlowPath(SlowPathCode * slow_path)1382 void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
1383 DCHECK(code_generation_data_ != nullptr);
1384 code_generation_data_->AddSlowPath(slow_path);
1385 }
1386
EmitVRegInfo(HEnvironment * environment,SlowPathCode * slow_path)1387 void CodeGenerator::EmitVRegInfo(HEnvironment* environment, SlowPathCode* slow_path) {
1388 StackMapStream* stack_map_stream = GetStackMapStream();
1389 // Walk over the environment, and record the location of dex registers.
1390 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1391 HInstruction* current = environment->GetInstructionAt(i);
1392 if (current == nullptr) {
1393 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1394 continue;
1395 }
1396
1397 using Kind = DexRegisterLocation::Kind;
1398 Location location = environment->GetLocationAt(i);
1399 switch (location.GetKind()) {
1400 case Location::kConstant: {
1401 DCHECK_EQ(current, location.GetConstant());
1402 if (current->IsLongConstant()) {
1403 int64_t value = current->AsLongConstant()->GetValue();
1404 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1405 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1406 ++i;
1407 DCHECK_LT(i, environment_size);
1408 } else if (current->IsDoubleConstant()) {
1409 int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
1410 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1411 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1412 ++i;
1413 DCHECK_LT(i, environment_size);
1414 } else if (current->IsIntConstant()) {
1415 int32_t value = current->AsIntConstant()->GetValue();
1416 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1417 } else if (current->IsNullConstant()) {
1418 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0);
1419 } else {
1420 DCHECK(current->IsFloatConstant()) << current->DebugName();
1421 int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
1422 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1423 }
1424 break;
1425 }
1426
1427 case Location::kStackSlot: {
1428 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1429 break;
1430 }
1431
1432 case Location::kDoubleStackSlot: {
1433 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1434 stack_map_stream->AddDexRegisterEntry(
1435 Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1436 ++i;
1437 DCHECK_LT(i, environment_size);
1438 break;
1439 }
1440
1441 case Location::kRegister : {
1442 int id = location.reg();
1443 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
1444 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
1445 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1446 if (current->GetType() == DataType::Type::kInt64) {
1447 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1448 ++i;
1449 DCHECK_LT(i, environment_size);
1450 }
1451 } else {
1452 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id);
1453 if (current->GetType() == DataType::Type::kInt64) {
1454 stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id);
1455 ++i;
1456 DCHECK_LT(i, environment_size);
1457 }
1458 }
1459 break;
1460 }
1461
1462 case Location::kFpuRegister : {
1463 int id = location.reg();
1464 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
1465 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
1466 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1467 if (current->GetType() == DataType::Type::kFloat64) {
1468 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1469 ++i;
1470 DCHECK_LT(i, environment_size);
1471 }
1472 } else {
1473 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id);
1474 if (current->GetType() == DataType::Type::kFloat64) {
1475 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id);
1476 ++i;
1477 DCHECK_LT(i, environment_size);
1478 }
1479 }
1480 break;
1481 }
1482
1483 case Location::kFpuRegisterPair : {
1484 int low = location.low();
1485 int high = location.high();
1486 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
1487 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
1488 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1489 } else {
1490 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low);
1491 }
1492 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
1493 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
1494 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1495 ++i;
1496 } else {
1497 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high);
1498 ++i;
1499 }
1500 DCHECK_LT(i, environment_size);
1501 break;
1502 }
1503
1504 case Location::kRegisterPair : {
1505 int low = location.low();
1506 int high = location.high();
1507 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
1508 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
1509 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1510 } else {
1511 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low);
1512 }
1513 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
1514 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
1515 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1516 } else {
1517 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high);
1518 }
1519 ++i;
1520 DCHECK_LT(i, environment_size);
1521 break;
1522 }
1523
1524 case Location::kInvalid: {
1525 stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0);
1526 break;
1527 }
1528
1529 default:
1530 LOG(FATAL) << "Unexpected kind " << location.GetKind();
1531 }
1532 }
1533 }
1534
EmitEnvironment(HEnvironment * environment,SlowPathCode * slow_path,bool needs_vreg_info)1535 void CodeGenerator::EmitEnvironment(HEnvironment* environment,
1536 SlowPathCode* slow_path,
1537 bool needs_vreg_info) {
1538 if (environment == nullptr) return;
1539
1540 StackMapStream* stack_map_stream = GetStackMapStream();
1541 bool emit_inline_info = environment->GetParent() != nullptr;
1542
1543 if (emit_inline_info) {
1544 // We emit the parent environment first.
1545 EmitEnvironment(environment->GetParent(), slow_path, needs_vreg_info);
1546 stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
1547 environment->GetDexPc(),
1548 needs_vreg_info ? environment->Size() : 0,
1549 &graph_->GetDexFile());
1550 }
1551
1552 if (needs_vreg_info) {
1553 // If a dex register map is not required we just won't emit it.
1554 EmitVRegInfo(environment, slow_path);
1555 }
1556
1557 if (emit_inline_info) {
1558 stack_map_stream->EndInlineInfoEntry();
1559 }
1560 }
1561
CanMoveNullCheckToUser(HNullCheck * null_check)1562 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
1563 return null_check->IsEmittedAtUseSite();
1564 }
1565
MaybeRecordImplicitNullCheck(HInstruction * instr)1566 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
1567 HNullCheck* null_check = instr->GetImplicitNullCheck();
1568 if (null_check != nullptr) {
1569 RecordPcInfo(null_check, null_check->GetDexPc(), GetAssembler()->CodePosition());
1570 }
1571 }
1572
CreateThrowingSlowPathLocations(HInstruction * instruction,RegisterSet caller_saves)1573 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
1574 RegisterSet caller_saves) {
1575 // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
1576 // HSuspendCheck from entry block). However, it will still get a valid stack frame
1577 // because the HNullCheck needs an environment.
1578 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
1579 // When throwing from a try block, we may need to retrieve dalvik registers from
1580 // physical registers and we also need to set up stack mask for GC. This is
1581 // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
1582 bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
1583 if (can_throw_into_catch_block) {
1584 call_kind = LocationSummary::kCallOnSlowPath;
1585 }
1586 LocationSummary* locations =
1587 new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
1588 if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
1589 locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
1590 }
1591 DCHECK(!instruction->HasUses());
1592 return locations;
1593 }
1594
GenerateNullCheck(HNullCheck * instruction)1595 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
1596 if (compiler_options_.GetImplicitNullChecks()) {
1597 MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
1598 GenerateImplicitNullCheck(instruction);
1599 } else {
1600 MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
1601 GenerateExplicitNullCheck(instruction);
1602 }
1603 }
1604
ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck * suspend_check,HParallelMove * spills) const1605 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
1606 HParallelMove* spills) const {
1607 LocationSummary* locations = suspend_check->GetLocations();
1608 HBasicBlock* block = suspend_check->GetBlock();
1609 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1610 DCHECK(block->IsLoopHeader());
1611 DCHECK(block->GetFirstInstruction() == spills);
1612
1613 for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
1614 Location dest = spills->MoveOperandsAt(i)->GetDestination();
1615 // All parallel moves in loop headers are spills.
1616 DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
1617 // Clear the stack bit marking a reference. Do not bother to check if the spill is
1618 // actually a reference spill, clearing bits that are already zero is harmless.
1619 locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
1620 }
1621 }
1622
EmitParallelMoves(Location from1,Location to1,DataType::Type type1,Location from2,Location to2,DataType::Type type2)1623 void CodeGenerator::EmitParallelMoves(Location from1,
1624 Location to1,
1625 DataType::Type type1,
1626 Location from2,
1627 Location to2,
1628 DataType::Type type2) {
1629 HParallelMove parallel_move(GetGraph()->GetAllocator());
1630 parallel_move.AddMove(from1, to1, type1, nullptr);
1631 parallel_move.AddMove(from2, to2, type2, nullptr);
1632 GetMoveResolver()->EmitNativeCode(¶llel_move);
1633 }
1634
ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,SlowPathCode * slow_path)1635 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
1636 HInstruction* instruction,
1637 SlowPathCode* slow_path) {
1638 // Ensure that the call kind indication given to the register allocator is
1639 // coherent with the runtime call generated.
1640 if (slow_path == nullptr) {
1641 DCHECK(instruction->GetLocations()->WillCall())
1642 << "instruction->DebugName()=" << instruction->DebugName();
1643 } else {
1644 DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
1645 << "instruction->DebugName()=" << instruction->DebugName()
1646 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1647 }
1648
1649 // Check that the GC side effect is set when required.
1650 // TODO: Reverse EntrypointCanTriggerGC
1651 if (EntrypointCanTriggerGC(entrypoint)) {
1652 if (slow_path == nullptr) {
1653 DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1654 << "instruction->DebugName()=" << instruction->DebugName()
1655 << " instruction->GetSideEffects().ToString()="
1656 << instruction->GetSideEffects().ToString();
1657 } else {
1658 // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend
1659 // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However
1660 // if execution never returns to the compiled code from a GC point this restriction is
1661 // unnecessary - in particular for fatal slow paths which might trigger GC.
1662 DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) ||
1663 instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1664 // When (non-Baker) read barriers are enabled, some instructions
1665 // use a slow path to emit a read barrier, which does not trigger
1666 // GC.
1667 (kEmitCompilerReadBarrier &&
1668 !kUseBakerReadBarrier &&
1669 (instruction->IsInstanceFieldGet() ||
1670 instruction->IsPredicatedInstanceFieldGet() ||
1671 instruction->IsStaticFieldGet() ||
1672 instruction->IsArrayGet() ||
1673 instruction->IsLoadClass() ||
1674 instruction->IsLoadString() ||
1675 instruction->IsInstanceOf() ||
1676 instruction->IsCheckCast() ||
1677 (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
1678 << "instruction->DebugName()=" << instruction->DebugName()
1679 << " instruction->GetSideEffects().ToString()="
1680 << instruction->GetSideEffects().ToString()
1681 << " slow_path->GetDescription()=" << slow_path->GetDescription() << std::endl
1682 << "Instruction and args: " << instruction->DumpWithArgs();
1683 }
1684 } else {
1685 // The GC side effect is not required for the instruction. But the instruction might still have
1686 // it, for example if it calls other entrypoints requiring it.
1687 }
1688
1689 // Check the coherency of leaf information.
1690 DCHECK(instruction->IsSuspendCheck()
1691 || ((slow_path != nullptr) && slow_path->IsFatal())
1692 || instruction->GetLocations()->CanCall()
1693 || !IsLeafMethod())
1694 << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1695 }
1696
ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction * instruction,SlowPathCode * slow_path)1697 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
1698 SlowPathCode* slow_path) {
1699 DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
1700 << "instruction->DebugName()=" << instruction->DebugName()
1701 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1702 // Only the Baker read barrier marking slow path used by certains
1703 // instructions is expected to invoke the runtime without recording
1704 // PC-related information.
1705 DCHECK(kUseBakerReadBarrier);
1706 DCHECK(instruction->IsInstanceFieldGet() ||
1707 instruction->IsPredicatedInstanceFieldGet() ||
1708 instruction->IsStaticFieldGet() ||
1709 instruction->IsArrayGet() ||
1710 instruction->IsArraySet() ||
1711 instruction->IsLoadClass() ||
1712 instruction->IsLoadString() ||
1713 instruction->IsInstanceOf() ||
1714 instruction->IsCheckCast() ||
1715 (instruction->IsInvoke() && instruction->GetLocations()->Intrinsified()))
1716 << "instruction->DebugName()=" << instruction->DebugName()
1717 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1718 }
1719
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1720 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1721 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1722
1723 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1724 for (uint32_t i : LowToHighBits(core_spills)) {
1725 // If the register holds an object, update the stack mask.
1726 if (locations->RegisterContainsObject(i)) {
1727 locations->SetStackBit(stack_offset / kVRegSize);
1728 }
1729 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1730 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1731 saved_core_stack_offsets_[i] = stack_offset;
1732 stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1733 }
1734
1735 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1736 for (uint32_t i : LowToHighBits(fp_spills)) {
1737 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1738 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1739 saved_fpu_stack_offsets_[i] = stack_offset;
1740 stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1741 }
1742 }
1743
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1744 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1745 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1746
1747 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1748 for (uint32_t i : LowToHighBits(core_spills)) {
1749 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1750 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1751 stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1752 }
1753
1754 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1755 for (uint32_t i : LowToHighBits(fp_spills)) {
1756 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1757 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1758 stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1759 }
1760 }
1761
CreateSystemArrayCopyLocationSummary(HInvoke * invoke)1762 void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
1763 // Check to see if we have known failures that will cause us to have to bail out
1764 // to the runtime, and just generate the runtime call directly.
1765 HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
1766 HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
1767
1768 // The positions must be non-negative.
1769 if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
1770 (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
1771 // We will have to fail anyways.
1772 return;
1773 }
1774
1775 // The length must be >= 0.
1776 HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
1777 if (length != nullptr) {
1778 int32_t len = length->GetValue();
1779 if (len < 0) {
1780 // Just call as normal.
1781 return;
1782 }
1783 }
1784
1785 SystemArrayCopyOptimizations optimizations(invoke);
1786
1787 if (optimizations.GetDestinationIsSource()) {
1788 if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
1789 // We only support backward copying if source and destination are the same.
1790 return;
1791 }
1792 }
1793
1794 if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
1795 // We currently don't intrinsify primitive copying.
1796 return;
1797 }
1798
1799 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
1800 LocationSummary* locations = new (allocator) LocationSummary(invoke,
1801 LocationSummary::kCallOnSlowPath,
1802 kIntrinsified);
1803 // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
1804 locations->SetInAt(0, Location::RequiresRegister());
1805 locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
1806 locations->SetInAt(2, Location::RequiresRegister());
1807 locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
1808 locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
1809
1810 locations->AddTemp(Location::RequiresRegister());
1811 locations->AddTemp(Location::RequiresRegister());
1812 locations->AddTemp(Location::RequiresRegister());
1813 }
1814
EmitJitRoots(uint8_t * code,const uint8_t * roots_data,std::vector<Handle<mirror::Object>> * roots)1815 void CodeGenerator::EmitJitRoots(uint8_t* code,
1816 const uint8_t* roots_data,
1817 /*out*/std::vector<Handle<mirror::Object>>* roots) {
1818 code_generation_data_->EmitJitRoots(roots);
1819 EmitJitRootPatches(code, roots_data);
1820 }
1821
GetArrayAllocationEntrypoint(HNewArray * new_array)1822 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
1823 switch (new_array->GetComponentSizeShift()) {
1824 case 0: return kQuickAllocArrayResolved8;
1825 case 1: return kQuickAllocArrayResolved16;
1826 case 2: return kQuickAllocArrayResolved32;
1827 case 3: return kQuickAllocArrayResolved64;
1828 }
1829 LOG(FATAL) << "Unreachable";
1830 UNREACHABLE();
1831 }
1832
1833 } // namespace art
1834