1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_generator.h"
18 #include "base/globals.h"
19
20 #ifdef ART_ENABLE_CODEGEN_arm
21 #include "code_generator_arm_vixl.h"
22 #endif
23
24 #ifdef ART_ENABLE_CODEGEN_arm64
25 #include "code_generator_arm64.h"
26 #endif
27
28 #ifdef ART_ENABLE_CODEGEN_riscv64
29 #include "code_generator_riscv64.h"
30 #endif
31
32 #ifdef ART_ENABLE_CODEGEN_x86
33 #include "code_generator_x86.h"
34 #endif
35
36 #ifdef ART_ENABLE_CODEGEN_x86_64
37 #include "code_generator_x86_64.h"
38 #endif
39
40 #include "art_method-inl.h"
41 #include "base/bit_utils.h"
42 #include "base/bit_utils_iterator.h"
43 #include "base/casts.h"
44 #include "base/leb128.h"
45 #include "class_linker.h"
46 #include "class_root-inl.h"
47 #include "code_generation_data.h"
48 #include "dex/bytecode_utils.h"
49 #include "dex/code_item_accessors-inl.h"
50 #include "graph_visualizer.h"
51 #include "gc/space/image_space.h"
52 #include "intern_table.h"
53 #include "intrinsics.h"
54 #include "mirror/array-inl.h"
55 #include "mirror/object_array-inl.h"
56 #include "mirror/object_reference.h"
57 #include "mirror/reference.h"
58 #include "mirror/string.h"
59 #include "parallel_move_resolver.h"
60 #include "scoped_thread_state_change-inl.h"
61 #include "ssa_liveness_analysis.h"
62 #include "oat/image.h"
63 #include "oat/stack_map.h"
64 #include "stack_map_stream.h"
65 #include "string_builder_append.h"
66 #include "thread-current-inl.h"
67 #include "utils/assembler.h"
68
69 namespace art HIDDEN {
70
71 // Return whether a location is consistent with a type.
CheckType(DataType::Type type,Location location)72 static bool CheckType(DataType::Type type, Location location) {
73 if (location.IsFpuRegister()
74 || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
75 return (type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64);
76 } else if (location.IsRegister() ||
77 (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
78 return DataType::IsIntegralType(type) || (type == DataType::Type::kReference);
79 } else if (location.IsRegisterPair()) {
80 return type == DataType::Type::kInt64;
81 } else if (location.IsFpuRegisterPair()) {
82 return type == DataType::Type::kFloat64;
83 } else if (location.IsStackSlot()) {
84 return (DataType::IsIntegralType(type) && type != DataType::Type::kInt64)
85 || (type == DataType::Type::kFloat32)
86 || (type == DataType::Type::kReference);
87 } else if (location.IsDoubleStackSlot()) {
88 return (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
89 } else if (location.IsConstant()) {
90 if (location.GetConstant()->IsIntConstant()) {
91 return DataType::IsIntegralType(type) && (type != DataType::Type::kInt64);
92 } else if (location.GetConstant()->IsNullConstant()) {
93 return type == DataType::Type::kReference;
94 } else if (location.GetConstant()->IsLongConstant()) {
95 return type == DataType::Type::kInt64;
96 } else if (location.GetConstant()->IsFloatConstant()) {
97 return type == DataType::Type::kFloat32;
98 } else {
99 return location.GetConstant()->IsDoubleConstant()
100 && (type == DataType::Type::kFloat64);
101 }
102 } else {
103 return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
104 }
105 }
106
107 // Check that a location summary is consistent with an instruction.
CheckTypeConsistency(HInstruction * instruction)108 static bool CheckTypeConsistency(HInstruction* instruction) {
109 LocationSummary* locations = instruction->GetLocations();
110 if (locations == nullptr) {
111 return true;
112 }
113
114 if (locations->Out().IsUnallocated()
115 && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
116 DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
117 << instruction->GetType()
118 << " " << locations->InAt(0);
119 } else {
120 DCHECK(CheckType(instruction->GetType(), locations->Out()))
121 << instruction->GetType()
122 << " " << locations->Out();
123 }
124
125 HConstInputsRef inputs = instruction->GetInputs();
126 for (size_t i = 0; i < inputs.size(); ++i) {
127 DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
128 << inputs[i]->GetType() << " " << locations->InAt(i);
129 }
130
131 HEnvironment* environment = instruction->GetEnvironment();
132 for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
133 if (environment->GetInstructionAt(i) != nullptr) {
134 DataType::Type type = environment->GetInstructionAt(i)->GetType();
135 DCHECK(CheckType(type, environment->GetLocationAt(i)))
136 << type << " " << environment->GetLocationAt(i);
137 } else {
138 DCHECK(environment->GetLocationAt(i).IsInvalid())
139 << environment->GetLocationAt(i);
140 }
141 }
142 return true;
143 }
144
EmitReadBarrier() const145 bool CodeGenerator::EmitReadBarrier() const {
146 return GetCompilerOptions().EmitReadBarrier();
147 }
148
EmitBakerReadBarrier() const149 bool CodeGenerator::EmitBakerReadBarrier() const {
150 return kUseBakerReadBarrier && GetCompilerOptions().EmitReadBarrier();
151 }
152
EmitNonBakerReadBarrier() const153 bool CodeGenerator::EmitNonBakerReadBarrier() const {
154 return !kUseBakerReadBarrier && GetCompilerOptions().EmitReadBarrier();
155 }
156
GetCompilerReadBarrierOption() const157 ReadBarrierOption CodeGenerator::GetCompilerReadBarrierOption() const {
158 return EmitReadBarrier() ? kWithReadBarrier : kWithoutReadBarrier;
159 }
160
ShouldCheckGCCard(DataType::Type type,HInstruction * value,WriteBarrierKind write_barrier_kind) const161 bool CodeGenerator::ShouldCheckGCCard(DataType::Type type,
162 HInstruction* value,
163 WriteBarrierKind write_barrier_kind) const {
164 const CompilerOptions& options = GetCompilerOptions();
165 const bool result =
166 // Check the GC card in debug mode,
167 options.EmitRunTimeChecksInDebugMode() &&
168 // only for CC GC,
169 options.EmitReadBarrier() &&
170 // and if we eliminated the write barrier in WBE.
171 !StoreNeedsWriteBarrier(type, value, write_barrier_kind) &&
172 CodeGenerator::StoreNeedsWriteBarrier(type, value);
173
174 DCHECK_IMPLIES(result, write_barrier_kind == WriteBarrierKind::kDontEmit);
175 DCHECK_IMPLIES(
176 result, !(GetGraph()->IsCompilingBaseline() && compiler_options_.ProfileBranches()));
177
178 return result;
179 }
180
GetScopedAllocator()181 ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
182 DCHECK(code_generation_data_ != nullptr);
183 return code_generation_data_->GetScopedAllocator();
184 }
185
GetStackMapStream()186 StackMapStream* CodeGenerator::GetStackMapStream() {
187 DCHECK(code_generation_data_ != nullptr);
188 return code_generation_data_->GetStackMapStream();
189 }
190
ReserveJitStringRoot(StringReference string_reference,Handle<mirror::String> string)191 void CodeGenerator::ReserveJitStringRoot(StringReference string_reference,
192 Handle<mirror::String> string) {
193 DCHECK(code_generation_data_ != nullptr);
194 code_generation_data_->ReserveJitStringRoot(string_reference, string);
195 }
196
GetJitStringRootIndex(StringReference string_reference)197 uint64_t CodeGenerator::GetJitStringRootIndex(StringReference string_reference) {
198 DCHECK(code_generation_data_ != nullptr);
199 return code_generation_data_->GetJitStringRootIndex(string_reference);
200 }
201
ReserveJitClassRoot(TypeReference type_reference,Handle<mirror::Class> klass)202 void CodeGenerator::ReserveJitClassRoot(TypeReference type_reference, Handle<mirror::Class> klass) {
203 DCHECK(code_generation_data_ != nullptr);
204 code_generation_data_->ReserveJitClassRoot(type_reference, klass);
205 }
206
GetJitClassRootIndex(TypeReference type_reference)207 uint64_t CodeGenerator::GetJitClassRootIndex(TypeReference type_reference) {
208 DCHECK(code_generation_data_ != nullptr);
209 return code_generation_data_->GetJitClassRootIndex(type_reference);
210 }
211
EmitJitRootPatches(uint8_t * code,const uint8_t * roots_data)212 void CodeGenerator::EmitJitRootPatches([[maybe_unused]] uint8_t* code,
213 [[maybe_unused]] const uint8_t* roots_data) {
214 DCHECK(code_generation_data_ != nullptr);
215 DCHECK_EQ(code_generation_data_->GetNumberOfJitStringRoots(), 0u);
216 DCHECK_EQ(code_generation_data_->GetNumberOfJitClassRoots(), 0u);
217 }
218
GetArrayLengthOffset(HArrayLength * array_length)219 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
220 return array_length->IsStringLength()
221 ? mirror::String::CountOffset().Uint32Value()
222 : mirror::Array::LengthOffset().Uint32Value();
223 }
224
GetArrayDataOffset(HArrayGet * array_get)225 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
226 DCHECK(array_get->GetType() == DataType::Type::kUint16 || !array_get->IsStringCharAt());
227 return array_get->IsStringCharAt()
228 ? mirror::String::ValueOffset().Uint32Value()
229 : mirror::Array::DataOffset(DataType::Size(array_get->GetType())).Uint32Value();
230 }
231
GoesToNextBlock(HBasicBlock * current,HBasicBlock * next) const232 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
233 DCHECK_EQ((*block_order_)[current_block_index_], current);
234 return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
235 }
236
GetNextBlockToEmit() const237 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
238 for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
239 HBasicBlock* block = (*block_order_)[i];
240 if (!block->IsSingleJump()) {
241 return block;
242 }
243 }
244 return nullptr;
245 }
246
FirstNonEmptyBlock(HBasicBlock * block) const247 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
248 while (block->IsSingleJump()) {
249 block = block->GetSuccessors()[0];
250 }
251 return block;
252 }
253
254 class DisassemblyScope {
255 public:
DisassemblyScope(HInstruction * instruction,const CodeGenerator & codegen)256 DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
257 : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
258 if (codegen_.GetDisassemblyInformation() != nullptr) {
259 start_offset_ = codegen_.GetAssembler().CodeSize();
260 }
261 }
262
~DisassemblyScope()263 ~DisassemblyScope() {
264 // We avoid building this data when we know it will not be used.
265 if (codegen_.GetDisassemblyInformation() != nullptr) {
266 codegen_.GetDisassemblyInformation()->AddInstructionInterval(
267 instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
268 }
269 }
270
271 private:
272 const CodeGenerator& codegen_;
273 HInstruction* instruction_;
274 size_t start_offset_;
275 };
276
277
GenerateSlowPaths()278 void CodeGenerator::GenerateSlowPaths() {
279 DCHECK(code_generation_data_ != nullptr);
280 size_t code_start = 0;
281 for (const std::unique_ptr<SlowPathCode>& slow_path_ptr : code_generation_data_->GetSlowPaths()) {
282 SlowPathCode* slow_path = slow_path_ptr.get();
283 current_slow_path_ = slow_path;
284 if (disasm_info_ != nullptr) {
285 code_start = GetAssembler()->CodeSize();
286 }
287 // Record the dex pc at start of slow path (required for java line number mapping).
288 MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
289 slow_path->EmitNativeCode(this);
290 if (disasm_info_ != nullptr) {
291 disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
292 }
293 }
294 current_slow_path_ = nullptr;
295 }
296
InitializeCodeGenerationData()297 void CodeGenerator::InitializeCodeGenerationData() {
298 DCHECK(code_generation_data_ == nullptr);
299 code_generation_data_ = CodeGenerationData::Create(graph_->GetArenaStack(), GetInstructionSet());
300 }
301
Compile()302 void CodeGenerator::Compile() {
303 InitializeCodeGenerationData();
304
305 // The register allocator already called `InitializeCodeGeneration`,
306 // where the frame size has been computed.
307 DCHECK(block_order_ != nullptr);
308 Initialize();
309
310 HGraphVisitor* instruction_visitor = GetInstructionVisitor();
311 DCHECK_EQ(current_block_index_, 0u);
312
313 GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_,
314 core_spill_mask_,
315 fpu_spill_mask_,
316 GetGraph()->GetNumberOfVRegs(),
317 GetGraph()->IsCompilingBaseline(),
318 GetGraph()->IsDebuggable(),
319 GetGraph()->HasShouldDeoptimizeFlag());
320
321 size_t frame_start = GetAssembler()->CodeSize();
322 GenerateFrameEntry();
323 DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
324 if (disasm_info_ != nullptr) {
325 disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
326 }
327
328 for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
329 HBasicBlock* block = (*block_order_)[current_block_index_];
330 // Don't generate code for an empty block. Its predecessors will branch to its successor
331 // directly. Also, the label of that block will not be emitted, so this helps catch
332 // errors where we reference that label.
333 if (block->IsSingleJump()) continue;
334 Bind(block);
335 // This ensures that we have correct native line mapping for all native instructions.
336 // It is necessary to make stepping over a statement work. Otherwise, any initial
337 // instructions (e.g. moves) would be assumed to be the start of next statement.
338 MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
339 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
340 HInstruction* current = it.Current();
341 if (current->HasEnvironment()) {
342 // Catch StackMaps are dealt with later on in `RecordCatchBlockInfo`.
343 if (block->IsCatchBlock() && block->GetFirstInstruction() == current) {
344 DCHECK(current->IsNop());
345 continue;
346 }
347
348 // Create stackmap for HNop or any instruction which calls native code.
349 // Note that we need correct mapping for the native PC of the call instruction,
350 // so the runtime's stackmap is not sufficient since it is at PC after the call.
351 MaybeRecordNativeDebugInfo(current, block->GetDexPc());
352 }
353 DisassemblyScope disassembly_scope(current, *this);
354 DCHECK(CheckTypeConsistency(current));
355 current->Accept(instruction_visitor);
356 }
357 }
358
359 GenerateSlowPaths();
360
361 // Emit catch stack maps at the end of the stack map stream as expected by the
362 // runtime exception handler.
363 if (graph_->HasTryCatch()) {
364 RecordCatchBlockInfo();
365 }
366
367 // Finalize instructions in assember;
368 Finalize();
369
370 GetStackMapStream()->EndMethod(GetAssembler()->CodeSize());
371 }
372
Finalize()373 void CodeGenerator::Finalize() {
374 GetAssembler()->FinalizeCode();
375 }
376
EmitLinkerPatches(ArenaVector<linker::LinkerPatch> * linker_patches)377 void CodeGenerator::EmitLinkerPatches(
378 [[maybe_unused]] ArenaVector<linker::LinkerPatch>* linker_patches) {
379 // No linker patches by default.
380 }
381
NeedsThunkCode(const linker::LinkerPatch & patch) const382 bool CodeGenerator::NeedsThunkCode([[maybe_unused]] const linker::LinkerPatch& patch) const {
383 // Code generators that create patches requiring thunk compilation should override this function.
384 return false;
385 }
386
EmitThunkCode(const linker::LinkerPatch & patch,ArenaVector<uint8_t> * code,std::string * debug_name)387 void CodeGenerator::EmitThunkCode([[maybe_unused]] const linker::LinkerPatch& patch,
388 [[maybe_unused]] /*out*/ ArenaVector<uint8_t>* code,
389 [[maybe_unused]] /*out*/ std::string* debug_name) {
390 // Code generators that create patches requiring thunk compilation should override this function.
391 LOG(FATAL) << "Unexpected call to EmitThunkCode().";
392 }
393
InitializeCodeGeneration(size_t number_of_spill_slots,size_t maximum_safepoint_spill_size,size_t number_of_out_slots,const ArenaVector<HBasicBlock * > & block_order)394 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
395 size_t maximum_safepoint_spill_size,
396 size_t number_of_out_slots,
397 const ArenaVector<HBasicBlock*>& block_order) {
398 block_order_ = &block_order;
399 DCHECK(!block_order.empty());
400 DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
401 ComputeSpillMask();
402 first_register_slot_in_slow_path_ = RoundUp(
403 (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
404
405 if (number_of_spill_slots == 0
406 && !HasAllocatedCalleeSaveRegisters()
407 && IsLeafMethod()
408 && !RequiresCurrentMethod()) {
409 DCHECK_EQ(maximum_safepoint_spill_size, 0u);
410 SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
411 } else {
412 SetFrameSize(RoundUp(
413 first_register_slot_in_slow_path_
414 + maximum_safepoint_spill_size
415 + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
416 + FrameEntrySpillSize(),
417 kStackAlignment));
418 }
419 }
420
CreateCommonInvokeLocationSummary(HInvoke * invoke,InvokeDexCallingConventionVisitor * visitor)421 void CodeGenerator::CreateCommonInvokeLocationSummary(
422 HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
423 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
424 LocationSummary* locations = new (allocator) LocationSummary(invoke,
425 LocationSummary::kCallOnMainOnly);
426
427 for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
428 HInstruction* input = invoke->InputAt(i);
429 locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
430 }
431
432 locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
433
434 if (invoke->IsInvokeStaticOrDirect()) {
435 HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
436 MethodLoadKind method_load_kind = call->GetMethodLoadKind();
437 CodePtrLocation code_ptr_location = call->GetCodePtrLocation();
438 if (code_ptr_location == CodePtrLocation::kCallCriticalNative) {
439 locations->AddTemp(Location::RequiresRegister()); // For target method.
440 }
441 if (code_ptr_location == CodePtrLocation::kCallCriticalNative ||
442 method_load_kind == MethodLoadKind::kRecursive) {
443 // For `kCallCriticalNative` we need the current method as the hidden argument
444 // if we reach the dlsym lookup stub for @CriticalNative.
445 locations->SetInAt(call->GetCurrentMethodIndex(), visitor->GetMethodLocation());
446 } else {
447 locations->AddTemp(visitor->GetMethodLocation());
448 if (method_load_kind == MethodLoadKind::kRuntimeCall) {
449 locations->SetInAt(call->GetCurrentMethodIndex(), Location::RequiresRegister());
450 }
451 }
452 } else if (!invoke->IsInvokePolymorphic()) {
453 locations->AddTemp(visitor->GetMethodLocation());
454 }
455 }
456
PrepareCriticalNativeArgumentMoves(HInvokeStaticOrDirect * invoke,InvokeDexCallingConventionVisitor * visitor,HParallelMove * parallel_move)457 void CodeGenerator::PrepareCriticalNativeArgumentMoves(
458 HInvokeStaticOrDirect* invoke,
459 /*inout*/InvokeDexCallingConventionVisitor* visitor,
460 /*out*/HParallelMove* parallel_move) {
461 LocationSummary* locations = invoke->GetLocations();
462 for (size_t i = 0, num = invoke->GetNumberOfArguments(); i != num; ++i) {
463 Location in_location = locations->InAt(i);
464 DataType::Type type = invoke->InputAt(i)->GetType();
465 DCHECK_NE(type, DataType::Type::kReference);
466 Location out_location = visitor->GetNextLocation(type);
467 if (out_location.IsStackSlot() || out_location.IsDoubleStackSlot()) {
468 // Stack arguments will need to be moved after adjusting the SP.
469 parallel_move->AddMove(in_location, out_location, type, /*instruction=*/ nullptr);
470 } else {
471 // Register arguments should have been assigned their final locations for register allocation.
472 DCHECK(out_location.Equals(in_location)) << in_location << " -> " << out_location;
473 }
474 }
475 }
476
FinishCriticalNativeFrameSetup(size_t out_frame_size,HParallelMove * parallel_move)477 void CodeGenerator::FinishCriticalNativeFrameSetup(size_t out_frame_size,
478 /*inout*/HParallelMove* parallel_move) {
479 DCHECK_NE(out_frame_size, 0u);
480 IncreaseFrame(out_frame_size);
481 // Adjust the source stack offsets by `out_frame_size`, i.e. the additional
482 // frame size needed for outgoing stack arguments.
483 for (size_t i = 0, num = parallel_move->NumMoves(); i != num; ++i) {
484 MoveOperands* operands = parallel_move->MoveOperandsAt(i);
485 Location source = operands->GetSource();
486 if (operands->GetSource().IsStackSlot()) {
487 operands->SetSource(Location::StackSlot(source.GetStackIndex() + out_frame_size));
488 } else if (operands->GetSource().IsDoubleStackSlot()) {
489 operands->SetSource(Location::DoubleStackSlot(source.GetStackIndex() + out_frame_size));
490 }
491 }
492 // Emit the moves.
493 GetMoveResolver()->EmitNativeCode(parallel_move);
494 }
495
GetCriticalNativeShorty(HInvokeStaticOrDirect * invoke)496 std::string_view CodeGenerator::GetCriticalNativeShorty(HInvokeStaticOrDirect* invoke) {
497 ScopedObjectAccess soa(Thread::Current());
498 DCHECK(invoke->GetResolvedMethod()->IsCriticalNative());
499 return invoke->GetResolvedMethod()->GetShortyView();
500 }
501
GenerateInvokeStaticOrDirectRuntimeCall(HInvokeStaticOrDirect * invoke,Location temp,SlowPathCode * slow_path)502 void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
503 HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
504 MethodReference method_reference(invoke->GetMethodReference());
505 MoveConstant(temp, method_reference.index);
506
507 // The access check is unnecessary but we do not want to introduce
508 // extra entrypoints for the codegens that do not support some
509 // invoke type and fall back to the runtime call.
510
511 // Initialize to anything to silent compiler warnings.
512 QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
513 switch (invoke->GetInvokeType()) {
514 case kStatic:
515 entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
516 break;
517 case kDirect:
518 entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
519 break;
520 case kSuper:
521 entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
522 break;
523 case kVirtual:
524 case kInterface:
525 case kPolymorphic:
526 case kCustom:
527 LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
528 UNREACHABLE();
529 }
530
531 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
532 }
GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved * invoke)533 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
534 MethodReference method_reference(invoke->GetMethodReference());
535 MoveConstant(invoke->GetLocations()->GetTemp(0), method_reference.index);
536
537 // Initialize to anything to silent compiler warnings.
538 QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
539 switch (invoke->GetInvokeType()) {
540 case kStatic:
541 entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
542 break;
543 case kDirect:
544 entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
545 break;
546 case kVirtual:
547 entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
548 break;
549 case kSuper:
550 entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
551 break;
552 case kInterface:
553 entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
554 break;
555 case kPolymorphic:
556 case kCustom:
557 LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
558 UNREACHABLE();
559 }
560 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
561 }
562
GenerateInvokePolymorphicCall(HInvokePolymorphic * invoke,SlowPathCode * slow_path)563 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke,
564 SlowPathCode* slow_path) {
565 // invoke-polymorphic does not use a temporary to convey any additional information (e.g. a
566 // method index) since it requires multiple info from the instruction (registers A, B, H). Not
567 // using the reservation has no effect on the registers used in the runtime call.
568 QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
569 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), slow_path);
570 }
571
GenerateInvokeCustomCall(HInvokeCustom * invoke)572 void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) {
573 MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetCallSiteIndex());
574 QuickEntrypointEnum entrypoint = kQuickInvokeCustom;
575 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
576 }
577
CreateStringBuilderAppendLocations(HStringBuilderAppend * instruction,Location out)578 void CodeGenerator::CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction,
579 Location out) {
580 ArenaAllocator* allocator = GetGraph()->GetAllocator();
581 LocationSummary* locations =
582 new (allocator) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
583 locations->SetOut(out);
584 instruction->GetLocations()->SetInAt(instruction->FormatIndex(),
585 Location::ConstantLocation(instruction->GetFormat()));
586
587 uint32_t format = static_cast<uint32_t>(instruction->GetFormat()->GetValue());
588 uint32_t f = format;
589 PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
590 size_t stack_offset = static_cast<size_t>(pointer_size); // Start after the ArtMethod*.
591 for (size_t i = 0, num_args = instruction->GetNumberOfArguments(); i != num_args; ++i) {
592 StringBuilderAppend::Argument arg_type =
593 static_cast<StringBuilderAppend::Argument>(f & StringBuilderAppend::kArgMask);
594 switch (arg_type) {
595 case StringBuilderAppend::Argument::kStringBuilder:
596 case StringBuilderAppend::Argument::kString:
597 case StringBuilderAppend::Argument::kCharArray:
598 static_assert(sizeof(StackReference<mirror::Object>) == sizeof(uint32_t), "Size check.");
599 FALLTHROUGH_INTENDED;
600 case StringBuilderAppend::Argument::kBoolean:
601 case StringBuilderAppend::Argument::kChar:
602 case StringBuilderAppend::Argument::kInt:
603 case StringBuilderAppend::Argument::kFloat:
604 locations->SetInAt(i, Location::StackSlot(stack_offset));
605 break;
606 case StringBuilderAppend::Argument::kLong:
607 case StringBuilderAppend::Argument::kDouble:
608 stack_offset = RoundUp(stack_offset, sizeof(uint64_t));
609 locations->SetInAt(i, Location::DoubleStackSlot(stack_offset));
610 // Skip the low word, let the common code skip the high word.
611 stack_offset += sizeof(uint32_t);
612 break;
613 default:
614 LOG(FATAL) << "Unexpected arg format: 0x" << std::hex
615 << (f & StringBuilderAppend::kArgMask) << " full format: 0x" << format;
616 UNREACHABLE();
617 }
618 f >>= StringBuilderAppend::kBitsPerArg;
619 stack_offset += sizeof(uint32_t);
620 }
621 DCHECK_EQ(f, 0u);
622
623 size_t param_size = stack_offset - static_cast<size_t>(pointer_size);
624 DCHECK_ALIGNED(param_size, kVRegSize);
625 size_t num_vregs = param_size / kVRegSize;
626 graph_->UpdateMaximumNumberOfOutVRegs(num_vregs);
627 }
628
CreateUnresolvedFieldLocationSummary(HInstruction * field_access,DataType::Type field_type,const FieldAccessCallingConvention & calling_convention)629 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
630 HInstruction* field_access,
631 DataType::Type field_type,
632 const FieldAccessCallingConvention& calling_convention) {
633 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
634 || field_access->IsUnresolvedInstanceFieldSet();
635 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
636 || field_access->IsUnresolvedStaticFieldGet();
637
638 ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetAllocator();
639 LocationSummary* locations =
640 new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
641
642 locations->AddTemp(calling_convention.GetFieldIndexLocation());
643
644 if (is_instance) {
645 // Add the `this` object for instance field accesses.
646 locations->SetInAt(0, calling_convention.GetObjectLocation());
647 }
648
649 // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
650 // regardless of the type. Because of that we forced to special case
651 // the access to floating point values.
652 if (is_get) {
653 if (DataType::IsFloatingPointType(field_type)) {
654 // The return value will be stored in regular registers while register
655 // allocator expects it in a floating point register.
656 // Note We don't need to request additional temps because the return
657 // register(s) are already blocked due the call and they may overlap with
658 // the input or field index.
659 // The transfer between the two will be done at codegen level.
660 locations->SetOut(calling_convention.GetFpuLocation(field_type));
661 } else {
662 locations->SetOut(calling_convention.GetReturnLocation(field_type));
663 }
664 } else {
665 size_t set_index = is_instance ? 1 : 0;
666 if (DataType::IsFloatingPointType(field_type)) {
667 // The set value comes from a float location while the calling convention
668 // expects it in a regular register location. Allocate a temp for it and
669 // make the transfer at codegen.
670 AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
671 locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
672 } else {
673 locations->SetInAt(set_index,
674 calling_convention.GetSetValueLocation(field_type, is_instance));
675 }
676 }
677 }
678
GenerateUnresolvedFieldAccess(HInstruction * field_access,DataType::Type field_type,uint32_t field_index,uint32_t dex_pc,const FieldAccessCallingConvention & calling_convention)679 void CodeGenerator::GenerateUnresolvedFieldAccess(
680 HInstruction* field_access,
681 DataType::Type field_type,
682 uint32_t field_index,
683 uint32_t dex_pc,
684 const FieldAccessCallingConvention& calling_convention) {
685 LocationSummary* locations = field_access->GetLocations();
686
687 MoveConstant(locations->GetTemp(0), field_index);
688
689 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
690 || field_access->IsUnresolvedInstanceFieldSet();
691 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
692 || field_access->IsUnresolvedStaticFieldGet();
693
694 if (!is_get && DataType::IsFloatingPointType(field_type)) {
695 // Copy the float value to be set into the calling convention register.
696 // Note that using directly the temp location is problematic as we don't
697 // support temp register pairs. To avoid boilerplate conversion code, use
698 // the location from the calling convention.
699 MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
700 locations->InAt(is_instance ? 1 : 0),
701 (DataType::Is64BitType(field_type) ? DataType::Type::kInt64
702 : DataType::Type::kInt32));
703 }
704
705 QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings.
706 switch (field_type) {
707 case DataType::Type::kBool:
708 entrypoint = is_instance
709 ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
710 : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
711 break;
712 case DataType::Type::kInt8:
713 entrypoint = is_instance
714 ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
715 : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
716 break;
717 case DataType::Type::kInt16:
718 entrypoint = is_instance
719 ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
720 : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
721 break;
722 case DataType::Type::kUint16:
723 entrypoint = is_instance
724 ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
725 : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
726 break;
727 case DataType::Type::kInt32:
728 case DataType::Type::kFloat32:
729 entrypoint = is_instance
730 ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
731 : (is_get ? kQuickGet32Static : kQuickSet32Static);
732 break;
733 case DataType::Type::kReference:
734 entrypoint = is_instance
735 ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
736 : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
737 break;
738 case DataType::Type::kInt64:
739 case DataType::Type::kFloat64:
740 entrypoint = is_instance
741 ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
742 : (is_get ? kQuickGet64Static : kQuickSet64Static);
743 break;
744 default:
745 LOG(FATAL) << "Invalid type " << field_type;
746 }
747 InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
748
749 if (is_get && DataType::IsFloatingPointType(field_type)) {
750 MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
751 }
752 }
753
CreateLoadClassRuntimeCallLocationSummary(HLoadClass * cls,Location runtime_type_index_location,Location runtime_return_location)754 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
755 Location runtime_type_index_location,
756 Location runtime_return_location) {
757 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
758 DCHECK_EQ(cls->InputCount(), 1u);
759 LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
760 cls, LocationSummary::kCallOnMainOnly);
761 locations->SetInAt(0, Location::NoLocation());
762 locations->AddTemp(runtime_type_index_location);
763 locations->SetOut(runtime_return_location);
764 }
765
GenerateLoadClassRuntimeCall(HLoadClass * cls)766 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
767 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kRuntimeCall);
768 DCHECK(!cls->MustGenerateClinitCheck());
769 LocationSummary* locations = cls->GetLocations();
770 MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
771 if (cls->NeedsAccessCheck()) {
772 CheckEntrypointTypes<kQuickResolveTypeAndVerifyAccess, void*, uint32_t>();
773 InvokeRuntime(kQuickResolveTypeAndVerifyAccess, cls, cls->GetDexPc());
774 } else {
775 CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
776 InvokeRuntime(kQuickResolveType, cls, cls->GetDexPc());
777 }
778 }
779
CreateLoadMethodHandleRuntimeCallLocationSummary(HLoadMethodHandle * method_handle,Location runtime_proto_index_location,Location runtime_return_location)780 void CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(
781 HLoadMethodHandle* method_handle,
782 Location runtime_proto_index_location,
783 Location runtime_return_location) {
784 DCHECK_EQ(method_handle->InputCount(), 1u);
785 LocationSummary* locations =
786 new (method_handle->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
787 method_handle, LocationSummary::kCallOnMainOnly);
788 locations->SetInAt(0, Location::NoLocation());
789 locations->AddTemp(runtime_proto_index_location);
790 locations->SetOut(runtime_return_location);
791 }
792
GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle * method_handle)793 void CodeGenerator::GenerateLoadMethodHandleRuntimeCall(HLoadMethodHandle* method_handle) {
794 LocationSummary* locations = method_handle->GetLocations();
795 MoveConstant(locations->GetTemp(0), method_handle->GetMethodHandleIndex());
796 CheckEntrypointTypes<kQuickResolveMethodHandle, void*, uint32_t>();
797 InvokeRuntime(kQuickResolveMethodHandle, method_handle, method_handle->GetDexPc());
798 }
799
CreateLoadMethodTypeRuntimeCallLocationSummary(HLoadMethodType * method_type,Location runtime_proto_index_location,Location runtime_return_location)800 void CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(
801 HLoadMethodType* method_type,
802 Location runtime_proto_index_location,
803 Location runtime_return_location) {
804 DCHECK_EQ(method_type->InputCount(), 1u);
805 LocationSummary* locations =
806 new (method_type->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
807 method_type, LocationSummary::kCallOnMainOnly);
808 locations->SetInAt(0, Location::NoLocation());
809 locations->AddTemp(runtime_proto_index_location);
810 locations->SetOut(runtime_return_location);
811 }
812
GenerateLoadMethodTypeRuntimeCall(HLoadMethodType * method_type)813 void CodeGenerator::GenerateLoadMethodTypeRuntimeCall(HLoadMethodType* method_type) {
814 LocationSummary* locations = method_type->GetLocations();
815 MoveConstant(locations->GetTemp(0), method_type->GetProtoIndex().index_);
816 CheckEntrypointTypes<kQuickResolveMethodType, void*, uint32_t>();
817 InvokeRuntime(kQuickResolveMethodType, method_type, method_type->GetDexPc());
818 }
819
GetBootImageOffsetImpl(const void * object,ImageHeader::ImageSections section)820 static uint32_t GetBootImageOffsetImpl(const void* object, ImageHeader::ImageSections section) {
821 Runtime* runtime = Runtime::Current();
822 const std::vector<gc::space::ImageSpace*>& boot_image_spaces =
823 runtime->GetHeap()->GetBootImageSpaces();
824 // Check that the `object` is in the expected section of one of the boot image files.
825 DCHECK(std::any_of(boot_image_spaces.begin(),
826 boot_image_spaces.end(),
827 [object, section](gc::space::ImageSpace* space) {
828 uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
829 uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
830 return space->GetImageHeader().GetImageSection(section).Contains(offset);
831 }));
832 uintptr_t begin = reinterpret_cast<uintptr_t>(boot_image_spaces.front()->Begin());
833 uintptr_t offset = reinterpret_cast<uintptr_t>(object) - begin;
834 return dchecked_integral_cast<uint32_t>(offset);
835 }
836
GetBootImageOffset(ObjPtr<mirror::Object> object)837 uint32_t CodeGenerator::GetBootImageOffset(ObjPtr<mirror::Object> object) {
838 return GetBootImageOffsetImpl(object.Ptr(), ImageHeader::kSectionObjects);
839 }
840
841 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffset(HLoadClass * load_class)842 uint32_t CodeGenerator::GetBootImageOffset(HLoadClass* load_class) NO_THREAD_SAFETY_ANALYSIS {
843 DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kBootImageRelRo);
844 ObjPtr<mirror::Class> klass = load_class->GetClass().Get();
845 DCHECK(klass != nullptr);
846 return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
847 }
848
849 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image strings are non-moveable.
GetBootImageOffset(HLoadString * load_string)850 uint32_t CodeGenerator::GetBootImageOffset(HLoadString* load_string) NO_THREAD_SAFETY_ANALYSIS {
851 DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kBootImageRelRo);
852 ObjPtr<mirror::String> string = load_string->GetString().Get();
853 DCHECK(string != nullptr);
854 return GetBootImageOffsetImpl(string.Ptr(), ImageHeader::kSectionObjects);
855 }
856
GetBootImageOffset(HInvoke * invoke)857 uint32_t CodeGenerator::GetBootImageOffset(HInvoke* invoke) {
858 ArtMethod* method = invoke->GetResolvedMethod();
859 DCHECK(method != nullptr);
860 return GetBootImageOffsetImpl(method, ImageHeader::kSectionArtMethods);
861 }
862
863 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image objects are non-moveable.
GetBootImageOffset(ClassRoot class_root)864 uint32_t CodeGenerator::GetBootImageOffset(ClassRoot class_root) NO_THREAD_SAFETY_ANALYSIS {
865 ObjPtr<mirror::Class> klass = GetClassRoot<kWithoutReadBarrier>(class_root);
866 return GetBootImageOffsetImpl(klass.Ptr(), ImageHeader::kSectionObjects);
867 }
868
869 // NO_THREAD_SAFETY_ANALYSIS: Avoid taking the mutator lock, boot image classes are non-moveable.
GetBootImageOffsetOfIntrinsicDeclaringClass(HInvoke * invoke)870 uint32_t CodeGenerator::GetBootImageOffsetOfIntrinsicDeclaringClass(HInvoke* invoke)
871 NO_THREAD_SAFETY_ANALYSIS {
872 DCHECK_NE(invoke->GetIntrinsic(), Intrinsics::kNone);
873 ArtMethod* method = invoke->GetResolvedMethod();
874 DCHECK(method != nullptr);
875 ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass<kWithoutReadBarrier>();
876 return GetBootImageOffsetImpl(declaring_class.Ptr(), ImageHeader::kSectionObjects);
877 }
878
BlockIfInRegister(Location location,bool is_out) const879 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
880 // The DCHECKS below check that a register is not specified twice in
881 // the summary. The out location can overlap with an input, so we need
882 // to special case it.
883 if (location.IsRegister()) {
884 DCHECK(is_out || !blocked_core_registers_[location.reg()]);
885 blocked_core_registers_[location.reg()] = true;
886 } else if (location.IsFpuRegister()) {
887 DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
888 blocked_fpu_registers_[location.reg()] = true;
889 } else if (location.IsFpuRegisterPair()) {
890 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
891 blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
892 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
893 blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
894 } else if (location.IsRegisterPair()) {
895 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
896 blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
897 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
898 blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
899 }
900 }
901
AllocateLocations(HInstruction * instruction)902 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
903 for (HEnvironment* env = instruction->GetEnvironment(); env != nullptr; env = env->GetParent()) {
904 env->AllocateLocations();
905 }
906 instruction->Accept(GetLocationBuilder());
907 DCHECK(CheckTypeConsistency(instruction));
908 LocationSummary* locations = instruction->GetLocations();
909 if (!instruction->IsSuspendCheckEntry()) {
910 if (locations != nullptr) {
911 if (locations->CanCall()) {
912 MarkNotLeaf();
913 if (locations->NeedsSuspendCheckEntry()) {
914 MarkNeedsSuspendCheckEntry();
915 }
916 } else if (locations->Intrinsified() &&
917 instruction->IsInvokeStaticOrDirect() &&
918 !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
919 // A static method call that has been fully intrinsified, and cannot call on the slow
920 // path or refer to the current method directly, no longer needs current method.
921 return;
922 }
923 }
924 if (instruction->NeedsCurrentMethod()) {
925 SetRequiresCurrentMethod();
926 }
927 }
928 }
929
Create(HGraph * graph,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)930 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
931 const CompilerOptions& compiler_options,
932 OptimizingCompilerStats* stats) {
933 ArenaAllocator* allocator = graph->GetAllocator();
934 switch (compiler_options.GetInstructionSet()) {
935 #ifdef ART_ENABLE_CODEGEN_arm
936 case InstructionSet::kArm:
937 case InstructionSet::kThumb2: {
938 return std::unique_ptr<CodeGenerator>(
939 new (allocator) arm::CodeGeneratorARMVIXL(graph, compiler_options, stats));
940 }
941 #endif
942 #ifdef ART_ENABLE_CODEGEN_arm64
943 case InstructionSet::kArm64: {
944 return std::unique_ptr<CodeGenerator>(
945 new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
946 }
947 #endif
948 #ifdef ART_ENABLE_CODEGEN_riscv64
949 case InstructionSet::kRiscv64: {
950 return std::unique_ptr<CodeGenerator>(
951 new (allocator) riscv64::CodeGeneratorRISCV64(graph, compiler_options, stats));
952 }
953 #endif
954 #ifdef ART_ENABLE_CODEGEN_x86
955 case InstructionSet::kX86: {
956 return std::unique_ptr<CodeGenerator>(
957 new (allocator) x86::CodeGeneratorX86(graph, compiler_options, stats));
958 }
959 #endif
960 #ifdef ART_ENABLE_CODEGEN_x86_64
961 case InstructionSet::kX86_64: {
962 return std::unique_ptr<CodeGenerator>(
963 new (allocator) x86_64::CodeGeneratorX86_64(graph, compiler_options, stats));
964 }
965 #endif
966 default:
967 UNUSED(allocator);
968 UNUSED(graph);
969 UNUSED(stats);
970 return nullptr;
971 }
972 }
973
CodeGenerator(HGraph * graph,size_t number_of_core_registers,size_t number_of_fpu_registers,size_t number_of_register_pairs,uint32_t core_callee_save_mask,uint32_t fpu_callee_save_mask,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats,const art::ArrayRef<const bool> & unimplemented_intrinsics)974 CodeGenerator::CodeGenerator(HGraph* graph,
975 size_t number_of_core_registers,
976 size_t number_of_fpu_registers,
977 size_t number_of_register_pairs,
978 uint32_t core_callee_save_mask,
979 uint32_t fpu_callee_save_mask,
980 const CompilerOptions& compiler_options,
981 OptimizingCompilerStats* stats,
982 const art::ArrayRef<const bool>& unimplemented_intrinsics)
983 : frame_size_(0),
984 core_spill_mask_(0),
985 fpu_spill_mask_(0),
986 first_register_slot_in_slow_path_(0),
987 allocated_registers_(RegisterSet::Empty()),
988 blocked_core_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_core_registers,
989 kArenaAllocCodeGenerator)),
990 blocked_fpu_registers_(graph->GetAllocator()->AllocArray<bool>(number_of_fpu_registers,
991 kArenaAllocCodeGenerator)),
992 number_of_core_registers_(number_of_core_registers),
993 number_of_fpu_registers_(number_of_fpu_registers),
994 number_of_register_pairs_(number_of_register_pairs),
995 core_callee_save_mask_(core_callee_save_mask),
996 fpu_callee_save_mask_(fpu_callee_save_mask),
997 block_order_(nullptr),
998 disasm_info_(nullptr),
999 stats_(stats),
1000 graph_(graph),
1001 compiler_options_(compiler_options),
1002 current_slow_path_(nullptr),
1003 current_block_index_(0),
1004 is_leaf_(true),
1005 needs_suspend_check_entry_(false),
1006 requires_current_method_(false),
1007 code_generation_data_(),
1008 unimplemented_intrinsics_(unimplemented_intrinsics) {
1009 if (GetGraph()->IsCompilingOsr()) {
1010 // Make OSR methods have all registers spilled, this simplifies the logic of
1011 // jumping to the compiled code directly.
1012 for (size_t i = 0; i < number_of_core_registers_; ++i) {
1013 if (IsCoreCalleeSaveRegister(i)) {
1014 AddAllocatedRegister(Location::RegisterLocation(i));
1015 }
1016 }
1017 for (size_t i = 0; i < number_of_fpu_registers_; ++i) {
1018 if (IsFloatingPointCalleeSaveRegister(i)) {
1019 AddAllocatedRegister(Location::FpuRegisterLocation(i));
1020 }
1021 }
1022 }
1023 if (GetGraph()->IsCompilingBaseline()) {
1024 // We need the current method in case we reach the hotness threshold. As a
1025 // side effect this makes the frame non-empty.
1026 SetRequiresCurrentMethod();
1027 }
1028 }
1029
~CodeGenerator()1030 CodeGenerator::~CodeGenerator() {}
1031
GetNumberOfJitRoots() const1032 size_t CodeGenerator::GetNumberOfJitRoots() const {
1033 DCHECK(code_generation_data_ != nullptr);
1034 return code_generation_data_->GetNumberOfJitRoots();
1035 }
1036
CheckCovers(uint32_t dex_pc,const HGraph & graph,const CodeInfo & code_info,const ArenaVector<HSuspendCheck * > & loop_headers,ArenaVector<size_t> * covered)1037 static void CheckCovers(uint32_t dex_pc,
1038 const HGraph& graph,
1039 const CodeInfo& code_info,
1040 const ArenaVector<HSuspendCheck*>& loop_headers,
1041 ArenaVector<size_t>* covered) {
1042 for (size_t i = 0; i < loop_headers.size(); ++i) {
1043 if (loop_headers[i]->GetDexPc() == dex_pc) {
1044 if (graph.IsCompilingOsr()) {
1045 DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc).IsValid());
1046 }
1047 ++(*covered)[i];
1048 }
1049 }
1050 }
1051
1052 // Debug helper to ensure loop entries in compiled code are matched by
1053 // dex branch instructions.
CheckLoopEntriesCanBeUsedForOsr(const HGraph & graph,const CodeInfo & code_info,const dex::CodeItem & code_item)1054 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
1055 const CodeInfo& code_info,
1056 const dex::CodeItem& code_item) {
1057 if (graph.HasTryCatch()) {
1058 // One can write loops through try/catch, which we do not support for OSR anyway.
1059 return;
1060 }
1061 ArenaVector<HSuspendCheck*> loop_headers(graph.GetAllocator()->Adapter(kArenaAllocMisc));
1062 for (HBasicBlock* block : graph.GetReversePostOrder()) {
1063 if (block->IsLoopHeader()) {
1064 HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
1065 if (suspend_check != nullptr && !suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
1066 loop_headers.push_back(suspend_check);
1067 }
1068 }
1069 }
1070 ArenaVector<size_t> covered(
1071 loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
1072 for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(),
1073 &code_item)) {
1074 const uint32_t dex_pc = pair.DexPc();
1075 const Instruction& instruction = pair.Inst();
1076 if (instruction.IsBranch()) {
1077 uint32_t target = dex_pc + instruction.GetTargetOffset();
1078 CheckCovers(target, graph, code_info, loop_headers, &covered);
1079 } else if (instruction.IsSwitch()) {
1080 DexSwitchTable table(instruction, dex_pc);
1081 uint16_t num_entries = table.GetNumEntries();
1082 size_t offset = table.GetFirstValueIndex();
1083
1084 // Use a larger loop counter type to avoid overflow issues.
1085 for (size_t i = 0; i < num_entries; ++i) {
1086 // The target of the case.
1087 uint32_t target = dex_pc + table.GetEntryAt(i + offset);
1088 CheckCovers(target, graph, code_info, loop_headers, &covered);
1089 }
1090 }
1091 }
1092
1093 for (size_t i = 0; i < covered.size(); ++i) {
1094 DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
1095 }
1096 }
1097
BuildStackMaps(const dex::CodeItem * code_item)1098 ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
1099 ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
1100 if (kIsDebugBuild && code_item != nullptr) {
1101 CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
1102 }
1103 return stack_map;
1104 }
1105
1106 // Returns whether stackmap dex register info is needed for the instruction.
1107 //
1108 // The following cases mandate having a dex register map:
1109 // * Deoptimization
1110 // when we need to obtain the values to restore actual vregisters for interpreter.
1111 // * Debuggability
1112 // when we want to observe the values / asynchronously deoptimize.
1113 // * Monitor operations
1114 // to allow dumping in a stack trace locked dex registers for non-debuggable code.
1115 // * On-stack-replacement (OSR)
1116 // when entering compiled for OSR code from the interpreter we need to initialize the compiled
1117 // code values with the values from the vregisters.
1118 // * Method local catch blocks
1119 // a catch block must see the environment of the instruction from the same method that can
1120 // throw to this block.
NeedsVregInfo(HInstruction * instruction,bool osr)1121 static bool NeedsVregInfo(HInstruction* instruction, bool osr) {
1122 HGraph* graph = instruction->GetBlock()->GetGraph();
1123 return instruction->IsDeoptimize() ||
1124 graph->IsDebuggable() ||
1125 graph->HasMonitorOperations() ||
1126 osr ||
1127 instruction->CanThrowIntoCatchBlock();
1128 }
1129
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path,bool native_debug_info)1130 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1131 uint32_t dex_pc,
1132 SlowPathCode* slow_path,
1133 bool native_debug_info) {
1134 RecordPcInfo(instruction, dex_pc, GetAssembler()->CodePosition(), slow_path, native_debug_info);
1135 }
1136
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,uint32_t native_pc,SlowPathCode * slow_path,bool native_debug_info)1137 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
1138 uint32_t dex_pc,
1139 uint32_t native_pc,
1140 SlowPathCode* slow_path,
1141 bool native_debug_info) {
1142 if (instruction != nullptr) {
1143 // The code generated for some type conversions
1144 // may call the runtime, thus normally requiring a subsequent
1145 // call to this method. However, the method verifier does not
1146 // produce PC information for certain instructions, which are
1147 // considered "atomic" (they cannot join a GC).
1148 // Therefore we do not currently record PC information for such
1149 // instructions. As this may change later, we added this special
1150 // case so that code generators may nevertheless call
1151 // CodeGenerator::RecordPcInfo without triggering an error in
1152 // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
1153 // thereafter.
1154 if (instruction->IsTypeConversion()) {
1155 return;
1156 }
1157 if (instruction->IsRem()) {
1158 DataType::Type type = instruction->AsRem()->GetResultType();
1159 if ((type == DataType::Type::kFloat32) || (type == DataType::Type::kFloat64)) {
1160 return;
1161 }
1162 }
1163 }
1164
1165 StackMapStream* stack_map_stream = GetStackMapStream();
1166 if (instruction == nullptr) {
1167 // For stack overflow checks and native-debug-info entries without dex register
1168 // mapping (i.e. start of basic block or start of slow path).
1169 stack_map_stream->BeginStackMapEntry(dex_pc, native_pc);
1170 stack_map_stream->EndStackMapEntry();
1171 return;
1172 }
1173
1174 LocationSummary* locations = instruction->GetLocations();
1175 uint32_t register_mask = locations->GetRegisterMask();
1176 DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
1177 if (locations->OnlyCallsOnSlowPath()) {
1178 // In case of slow path, we currently set the location of caller-save registers
1179 // to register (instead of their stack location when pushed before the slow-path
1180 // call). Therefore register_mask contains both callee-save and caller-save
1181 // registers that hold objects. We must remove the spilled caller-save from the
1182 // mask, since they will be overwritten by the callee.
1183 uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
1184 register_mask &= ~spills;
1185 } else {
1186 // The register mask must be a subset of callee-save registers.
1187 DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
1188 }
1189
1190 uint32_t outer_dex_pc = dex_pc;
1191 uint32_t inlining_depth = 0;
1192 HEnvironment* const environment = instruction->GetEnvironment();
1193 if (environment != nullptr) {
1194 HEnvironment* outer_environment = environment;
1195 while (outer_environment->GetParent() != nullptr) {
1196 outer_environment = outer_environment->GetParent();
1197 ++inlining_depth;
1198 }
1199 outer_dex_pc = outer_environment->GetDexPc();
1200 }
1201
1202 HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
1203 bool osr =
1204 instruction->IsSuspendCheck() &&
1205 (info != nullptr) &&
1206 graph_->IsCompilingOsr() &&
1207 (inlining_depth == 0);
1208 StackMap::Kind kind = native_debug_info
1209 ? StackMap::Kind::Debug
1210 : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default);
1211 bool needs_vreg_info = NeedsVregInfo(instruction, osr);
1212 stack_map_stream->BeginStackMapEntry(outer_dex_pc,
1213 native_pc,
1214 register_mask,
1215 locations->GetStackMask(),
1216 kind,
1217 needs_vreg_info);
1218
1219 EmitEnvironment(environment, slow_path, needs_vreg_info);
1220 stack_map_stream->EndStackMapEntry();
1221
1222 if (osr) {
1223 DCHECK_EQ(info->GetSuspendCheck(), instruction);
1224 DCHECK(info->IsIrreducible());
1225 DCHECK(environment != nullptr);
1226 if (kIsDebugBuild) {
1227 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1228 HInstruction* in_environment = environment->GetInstructionAt(i);
1229 if (in_environment != nullptr) {
1230 DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
1231 Location location = environment->GetLocationAt(i);
1232 DCHECK(location.IsStackSlot() ||
1233 location.IsDoubleStackSlot() ||
1234 location.IsConstant() ||
1235 location.IsInvalid());
1236 if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
1237 DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
1238 }
1239 }
1240 }
1241 }
1242 }
1243 }
1244
HasStackMapAtCurrentPc()1245 bool CodeGenerator::HasStackMapAtCurrentPc() {
1246 uint32_t pc = GetAssembler()->CodeSize();
1247 StackMapStream* stack_map_stream = GetStackMapStream();
1248 size_t count = stack_map_stream->GetNumberOfStackMaps();
1249 if (count == 0) {
1250 return false;
1251 }
1252 return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc;
1253 }
1254
MaybeRecordNativeDebugInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1255 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
1256 uint32_t dex_pc,
1257 SlowPathCode* slow_path) {
1258 if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
1259 if (HasStackMapAtCurrentPc()) {
1260 // Ensure that we do not collide with the stack map of the previous instruction.
1261 GenerateNop();
1262 }
1263 RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
1264 }
1265 }
1266
RecordCatchBlockInfo()1267 void CodeGenerator::RecordCatchBlockInfo() {
1268 StackMapStream* stack_map_stream = GetStackMapStream();
1269
1270 for (HBasicBlock* block : *block_order_) {
1271 if (!block->IsCatchBlock()) {
1272 continue;
1273 }
1274
1275 // Get the outer dex_pc. We save the full environment list for DCHECK purposes in kIsDebugBuild.
1276 std::vector<uint32_t> dex_pc_list_for_verification;
1277 if (kIsDebugBuild) {
1278 dex_pc_list_for_verification.push_back(block->GetDexPc());
1279 }
1280 DCHECK(block->GetFirstInstruction()->IsNop());
1281 DCHECK(block->GetFirstInstruction()->AsNop()->NeedsEnvironment());
1282 HEnvironment* const environment = block->GetFirstInstruction()->GetEnvironment();
1283 DCHECK(environment != nullptr);
1284 HEnvironment* outer_environment = environment;
1285 while (outer_environment->GetParent() != nullptr) {
1286 outer_environment = outer_environment->GetParent();
1287 if (kIsDebugBuild) {
1288 dex_pc_list_for_verification.push_back(outer_environment->GetDexPc());
1289 }
1290 }
1291
1292 if (kIsDebugBuild) {
1293 // dex_pc_list_for_verification is set from innnermost to outermost. Let's reverse it
1294 // since we are expected to pass from outermost to innermost.
1295 std::reverse(dex_pc_list_for_verification.begin(), dex_pc_list_for_verification.end());
1296 DCHECK_EQ(dex_pc_list_for_verification.front(), outer_environment->GetDexPc());
1297 }
1298
1299 uint32_t native_pc = GetAddressOf(block);
1300 stack_map_stream->BeginStackMapEntry(outer_environment->GetDexPc(),
1301 native_pc,
1302 /* register_mask= */ 0,
1303 /* sp_mask= */ nullptr,
1304 StackMap::Kind::Catch,
1305 /* needs_vreg_info= */ true,
1306 dex_pc_list_for_verification);
1307
1308 EmitEnvironment(environment,
1309 /* slow_path= */ nullptr,
1310 /* needs_vreg_info= */ true,
1311 /* is_for_catch_handler= */ true);
1312
1313 stack_map_stream->EndStackMapEntry();
1314 }
1315 }
1316
AddSlowPath(SlowPathCode * slow_path)1317 void CodeGenerator::AddSlowPath(SlowPathCode* slow_path) {
1318 DCHECK(code_generation_data_ != nullptr);
1319 code_generation_data_->AddSlowPath(slow_path);
1320 }
1321
EmitVRegInfo(HEnvironment * environment,SlowPathCode * slow_path,bool is_for_catch_handler)1322 void CodeGenerator::EmitVRegInfo(HEnvironment* environment,
1323 SlowPathCode* slow_path,
1324 bool is_for_catch_handler) {
1325 StackMapStream* stack_map_stream = GetStackMapStream();
1326 // Walk over the environment, and record the location of dex registers.
1327 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
1328 HInstruction* current = environment->GetInstructionAt(i);
1329 if (current == nullptr) {
1330 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1331 continue;
1332 }
1333
1334 using Kind = DexRegisterLocation::Kind;
1335 Location location = environment->GetLocationAt(i);
1336 switch (location.GetKind()) {
1337 case Location::kConstant: {
1338 DCHECK_EQ(current, location.GetConstant());
1339 if (current->IsLongConstant()) {
1340 int64_t value = current->AsLongConstant()->GetValue();
1341 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1342 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1343 ++i;
1344 DCHECK_LT(i, environment_size);
1345 } else if (current->IsDoubleConstant()) {
1346 int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
1347 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
1348 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
1349 ++i;
1350 DCHECK_LT(i, environment_size);
1351 } else if (current->IsIntConstant()) {
1352 int32_t value = current->AsIntConstant()->GetValue();
1353 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1354 } else if (current->IsNullConstant()) {
1355 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0);
1356 } else {
1357 DCHECK(current->IsFloatConstant()) << current->DebugName();
1358 int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
1359 stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
1360 }
1361 break;
1362 }
1363
1364 case Location::kStackSlot: {
1365 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1366 break;
1367 }
1368
1369 case Location::kDoubleStackSlot: {
1370 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
1371 stack_map_stream->AddDexRegisterEntry(
1372 Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1373 ++i;
1374 DCHECK_LT(i, environment_size);
1375 break;
1376 }
1377
1378 case Location::kRegister : {
1379 DCHECK(!is_for_catch_handler);
1380 int id = location.reg();
1381 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
1382 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
1383 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1384 if (current->GetType() == DataType::Type::kInt64) {
1385 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1386 ++i;
1387 DCHECK_LT(i, environment_size);
1388 }
1389 } else {
1390 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id);
1391 if (current->GetType() == DataType::Type::kInt64) {
1392 stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id);
1393 ++i;
1394 DCHECK_LT(i, environment_size);
1395 }
1396 }
1397 break;
1398 }
1399
1400 case Location::kFpuRegister : {
1401 DCHECK(!is_for_catch_handler);
1402 int id = location.reg();
1403 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
1404 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
1405 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1406 if (current->GetType() == DataType::Type::kFloat64) {
1407 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
1408 ++i;
1409 DCHECK_LT(i, environment_size);
1410 }
1411 } else {
1412 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id);
1413 if (current->GetType() == DataType::Type::kFloat64) {
1414 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id);
1415 ++i;
1416 DCHECK_LT(i, environment_size);
1417 }
1418 }
1419 break;
1420 }
1421
1422 case Location::kFpuRegisterPair : {
1423 DCHECK(!is_for_catch_handler);
1424 int low = location.low();
1425 int high = location.high();
1426 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
1427 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
1428 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1429 } else {
1430 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low);
1431 }
1432 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
1433 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
1434 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1435 ++i;
1436 } else {
1437 stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high);
1438 ++i;
1439 }
1440 DCHECK_LT(i, environment_size);
1441 break;
1442 }
1443
1444 case Location::kRegisterPair : {
1445 DCHECK(!is_for_catch_handler);
1446 int low = location.low();
1447 int high = location.high();
1448 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
1449 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
1450 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1451 } else {
1452 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low);
1453 }
1454 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
1455 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
1456 stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
1457 } else {
1458 stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high);
1459 }
1460 ++i;
1461 DCHECK_LT(i, environment_size);
1462 break;
1463 }
1464
1465 case Location::kInvalid: {
1466 stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0);
1467 break;
1468 }
1469
1470 default:
1471 LOG(FATAL) << "Unexpected kind " << location.GetKind();
1472 }
1473 }
1474 }
1475
EmitVRegInfoOnlyCatchPhis(HEnvironment * environment)1476 void CodeGenerator::EmitVRegInfoOnlyCatchPhis(HEnvironment* environment) {
1477 StackMapStream* stack_map_stream = GetStackMapStream();
1478 DCHECK(environment->GetHolder()->GetBlock()->IsCatchBlock());
1479 DCHECK_EQ(environment->GetHolder()->GetBlock()->GetFirstInstruction(), environment->GetHolder());
1480 HInstruction* current_phi = environment->GetHolder()->GetBlock()->GetFirstPhi();
1481 for (size_t vreg = 0; vreg < environment->Size(); ++vreg) {
1482 while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
1483 HInstruction* next_phi = current_phi->GetNext();
1484 DCHECK(next_phi == nullptr ||
1485 current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
1486 << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
1487 current_phi = next_phi;
1488 }
1489
1490 if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
1491 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1492 } else {
1493 Location location = current_phi->GetLocations()->Out();
1494 switch (location.GetKind()) {
1495 case Location::kStackSlot: {
1496 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
1497 location.GetStackIndex());
1498 break;
1499 }
1500 case Location::kDoubleStackSlot: {
1501 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
1502 location.GetStackIndex());
1503 stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack,
1504 location.GetHighStackIndex(kVRegSize));
1505 ++vreg;
1506 DCHECK_LT(vreg, environment->Size());
1507 break;
1508 }
1509 default: {
1510 LOG(FATAL) << "All catch phis must be allocated to a stack slot. Unexpected kind "
1511 << location.GetKind();
1512 UNREACHABLE();
1513 }
1514 }
1515 }
1516 }
1517 }
1518
EmitEnvironment(HEnvironment * environment,SlowPathCode * slow_path,bool needs_vreg_info,bool is_for_catch_handler,bool innermost_environment)1519 void CodeGenerator::EmitEnvironment(HEnvironment* environment,
1520 SlowPathCode* slow_path,
1521 bool needs_vreg_info,
1522 bool is_for_catch_handler,
1523 bool innermost_environment) {
1524 if (environment == nullptr) return;
1525
1526 StackMapStream* stack_map_stream = GetStackMapStream();
1527 bool emit_inline_info = environment->GetParent() != nullptr;
1528
1529 if (emit_inline_info) {
1530 // We emit the parent environment first.
1531 EmitEnvironment(environment->GetParent(),
1532 slow_path,
1533 needs_vreg_info,
1534 is_for_catch_handler,
1535 /* innermost_environment= */ false);
1536 stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
1537 environment->GetDexPc(),
1538 needs_vreg_info ? environment->Size() : 0,
1539 &graph_->GetDexFile(),
1540 this);
1541 }
1542
1543 // If a dex register map is not required we just won't emit it.
1544 if (needs_vreg_info) {
1545 if (innermost_environment && is_for_catch_handler) {
1546 EmitVRegInfoOnlyCatchPhis(environment);
1547 } else {
1548 EmitVRegInfo(environment, slow_path, is_for_catch_handler);
1549 }
1550 }
1551
1552 if (emit_inline_info) {
1553 stack_map_stream->EndInlineInfoEntry();
1554 }
1555 }
1556
CanMoveNullCheckToUser(HNullCheck * null_check)1557 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
1558 return null_check->IsEmittedAtUseSite();
1559 }
1560
MaybeRecordImplicitNullCheck(HInstruction * instr)1561 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
1562 HNullCheck* null_check = instr->GetImplicitNullCheck();
1563 if (null_check != nullptr) {
1564 RecordPcInfo(null_check, null_check->GetDexPc(), GetAssembler()->CodePosition());
1565 }
1566 }
1567
CreateThrowingSlowPathLocations(HInstruction * instruction,RegisterSet caller_saves)1568 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
1569 RegisterSet caller_saves) {
1570 // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
1571 // HSuspendCheck from entry block). However, it will still get a valid stack frame
1572 // because the HNullCheck needs an environment.
1573 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
1574 // When throwing from a try block, we may need to retrieve dalvik registers from
1575 // physical registers and we also need to set up stack mask for GC. This is
1576 // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
1577 bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
1578 if (can_throw_into_catch_block) {
1579 call_kind = LocationSummary::kCallOnSlowPath;
1580 }
1581 LocationSummary* locations =
1582 new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
1583 if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
1584 locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
1585 }
1586 DCHECK(!instruction->HasUses());
1587 return locations;
1588 }
1589
GenerateNullCheck(HNullCheck * instruction)1590 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
1591 if (compiler_options_.GetImplicitNullChecks()) {
1592 MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
1593 GenerateImplicitNullCheck(instruction);
1594 } else {
1595 MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
1596 GenerateExplicitNullCheck(instruction);
1597 }
1598 }
1599
ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck * suspend_check,HParallelMove * spills) const1600 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check,
1601 HParallelMove* spills) const {
1602 LocationSummary* locations = suspend_check->GetLocations();
1603 HBasicBlock* block = suspend_check->GetBlock();
1604 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1605 DCHECK(block->IsLoopHeader());
1606 DCHECK(block->GetFirstInstruction() == spills);
1607
1608 for (size_t i = 0, num_moves = spills->NumMoves(); i != num_moves; ++i) {
1609 Location dest = spills->MoveOperandsAt(i)->GetDestination();
1610 // All parallel moves in loop headers are spills.
1611 DCHECK(dest.IsStackSlot() || dest.IsDoubleStackSlot() || dest.IsSIMDStackSlot()) << dest;
1612 // Clear the stack bit marking a reference. Do not bother to check if the spill is
1613 // actually a reference spill, clearing bits that are already zero is harmless.
1614 locations->ClearStackBit(dest.GetStackIndex() / kVRegSize);
1615 }
1616 }
1617
EmitParallelMoves(Location from1,Location to1,DataType::Type type1,Location from2,Location to2,DataType::Type type2)1618 void CodeGenerator::EmitParallelMoves(Location from1,
1619 Location to1,
1620 DataType::Type type1,
1621 Location from2,
1622 Location to2,
1623 DataType::Type type2) {
1624 HParallelMove parallel_move(GetGraph()->GetAllocator());
1625 parallel_move.AddMove(from1, to1, type1, nullptr);
1626 parallel_move.AddMove(from2, to2, type2, nullptr);
1627 GetMoveResolver()->EmitNativeCode(¶llel_move);
1628 }
1629
StoreNeedsWriteBarrier(DataType::Type type,HInstruction * value,WriteBarrierKind write_barrier_kind) const1630 bool CodeGenerator::StoreNeedsWriteBarrier(DataType::Type type,
1631 HInstruction* value,
1632 WriteBarrierKind write_barrier_kind) const {
1633 // Check that null value is not represented as an integer constant.
1634 DCHECK_IMPLIES(type == DataType::Type::kReference, !value->IsIntConstant());
1635 // Branch profiling currently doesn't support running optimizations.
1636 return (GetGraph()->IsCompilingBaseline() && compiler_options_.ProfileBranches())
1637 ? CodeGenerator::StoreNeedsWriteBarrier(type, value)
1638 : write_barrier_kind != WriteBarrierKind::kDontEmit;
1639 }
1640
ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,SlowPathCode * slow_path)1641 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
1642 HInstruction* instruction,
1643 SlowPathCode* slow_path) {
1644 // Ensure that the call kind indication given to the register allocator is
1645 // coherent with the runtime call generated.
1646 if (slow_path == nullptr) {
1647 DCHECK(instruction->GetLocations()->WillCall())
1648 << "instruction->DebugName()=" << instruction->DebugName();
1649 } else {
1650 DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
1651 << "instruction->DebugName()=" << instruction->DebugName()
1652 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1653 }
1654
1655 // Check that the GC side effect is set when required.
1656 // TODO: Reverse EntrypointCanTriggerGC
1657 if (EntrypointCanTriggerGC(entrypoint)) {
1658 if (slow_path == nullptr) {
1659 DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1660 << "instruction->DebugName()=" << instruction->DebugName()
1661 << " instruction->GetSideEffects().ToString()="
1662 << instruction->GetSideEffects().ToString();
1663 } else {
1664 // 'CanTriggerGC' side effect is used to restrict optimization of instructions which depend
1665 // on GC (e.g. IntermediateAddress) - to ensure they are not alive across GC points. However
1666 // if execution never returns to the compiled code from a GC point this restriction is
1667 // unnecessary - in particular for fatal slow paths which might trigger GC.
1668 DCHECK((slow_path->IsFatal() && !instruction->GetLocations()->WillCall()) ||
1669 instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1670 // When (non-Baker) read barriers are enabled, some instructions
1671 // use a slow path to emit a read barrier, which does not trigger
1672 // GC.
1673 (EmitNonBakerReadBarrier() &&
1674 (instruction->IsInstanceFieldGet() ||
1675 instruction->IsStaticFieldGet() ||
1676 instruction->IsArrayGet() ||
1677 instruction->IsLoadClass() ||
1678 instruction->IsLoadString() ||
1679 instruction->IsInstanceOf() ||
1680 instruction->IsCheckCast() ||
1681 (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
1682 << "instruction->DebugName()=" << instruction->DebugName()
1683 << " instruction->GetSideEffects().ToString()="
1684 << instruction->GetSideEffects().ToString()
1685 << " slow_path->GetDescription()=" << slow_path->GetDescription() << std::endl
1686 << "Instruction and args: " << instruction->DumpWithArgs();
1687 }
1688 } else {
1689 // The GC side effect is not required for the instruction. But the instruction might still have
1690 // it, for example if it calls other entrypoints requiring it.
1691 }
1692
1693 // Check the coherency of leaf information.
1694 DCHECK(instruction->IsSuspendCheck()
1695 || ((slow_path != nullptr) && slow_path->IsFatal())
1696 || instruction->GetLocations()->CanCall()
1697 || !IsLeafMethod())
1698 << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1699 }
1700
ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction * instruction,SlowPathCode * slow_path)1701 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
1702 SlowPathCode* slow_path) {
1703 DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
1704 << "instruction->DebugName()=" << instruction->DebugName()
1705 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1706 // Only the Baker read barrier marking slow path used by certains
1707 // instructions is expected to invoke the runtime without recording
1708 // PC-related information.
1709 DCHECK(kUseBakerReadBarrier);
1710 DCHECK(instruction->IsInstanceFieldGet() ||
1711 instruction->IsStaticFieldGet() ||
1712 instruction->IsArrayGet() ||
1713 instruction->IsArraySet() ||
1714 instruction->IsLoadClass() ||
1715 instruction->IsLoadMethodType() ||
1716 instruction->IsLoadString() ||
1717 instruction->IsInstanceOf() ||
1718 instruction->IsCheckCast() ||
1719 (instruction->IsInvoke() && instruction->GetLocations()->Intrinsified()))
1720 << "instruction->DebugName()=" << instruction->DebugName()
1721 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1722 }
1723
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1724 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1725 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1726
1727 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1728 for (uint32_t i : LowToHighBits(core_spills)) {
1729 // If the register holds an object, update the stack mask.
1730 if (locations->RegisterContainsObject(i)) {
1731 locations->SetStackBit(stack_offset / kVRegSize);
1732 }
1733 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1734 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1735 saved_core_stack_offsets_[i] = stack_offset;
1736 stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1737 }
1738
1739 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1740 for (uint32_t i : LowToHighBits(fp_spills)) {
1741 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1742 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1743 saved_fpu_stack_offsets_[i] = stack_offset;
1744 stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1745 }
1746 }
1747
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1748 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1749 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1750
1751 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
1752 for (uint32_t i : LowToHighBits(core_spills)) {
1753 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1754 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1755 stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1756 }
1757
1758 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
1759 for (uint32_t i : LowToHighBits(fp_spills)) {
1760 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1761 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1762 stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1763 }
1764 }
1765
CreateSystemArrayCopyLocationSummary(HInvoke * invoke,int32_t length_threshold,size_t num_temps)1766 LocationSummary* CodeGenerator::CreateSystemArrayCopyLocationSummary(
1767 HInvoke* invoke, int32_t length_threshold, size_t num_temps) {
1768 // Check to see if we have known failures that will cause us to have to bail out
1769 // to the runtime, and just generate the runtime call directly.
1770 HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstantOrNull();
1771 HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstantOrNull();
1772
1773 // The positions must be non-negative.
1774 if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
1775 (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
1776 // We will have to fail anyways.
1777 return nullptr;
1778 }
1779
1780 // The length must be >= 0. If a positive `length_threshold` is provided, lengths
1781 // greater or equal to the threshold are also handled by the normal implementation.
1782 HIntConstant* length = invoke->InputAt(4)->AsIntConstantOrNull();
1783 if (length != nullptr) {
1784 int32_t len = length->GetValue();
1785 if (len < 0 || (length_threshold > 0 && len >= length_threshold)) {
1786 // Just call as normal.
1787 return nullptr;
1788 }
1789 }
1790
1791 SystemArrayCopyOptimizations optimizations(invoke);
1792
1793 if (optimizations.GetDestinationIsSource()) {
1794 if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
1795 // We only support backward copying if source and destination are the same.
1796 return nullptr;
1797 }
1798 }
1799
1800 if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
1801 // We currently don't intrinsify primitive copying.
1802 return nullptr;
1803 }
1804
1805 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
1806 LocationSummary* locations = new (allocator) LocationSummary(invoke,
1807 LocationSummary::kCallOnSlowPath,
1808 kIntrinsified);
1809 // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
1810 locations->SetInAt(0, Location::RequiresRegister());
1811 locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
1812 locations->SetInAt(2, Location::RequiresRegister());
1813 locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
1814 locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
1815
1816 if (num_temps != 0u) {
1817 locations->AddRegisterTemps(num_temps);
1818 }
1819 return locations;
1820 }
1821
EmitJitRoots(uint8_t * code,const uint8_t * roots_data,std::vector<Handle<mirror::Object>> * roots)1822 void CodeGenerator::EmitJitRoots(uint8_t* code,
1823 const uint8_t* roots_data,
1824 /*out*/std::vector<Handle<mirror::Object>>* roots) {
1825 code_generation_data_->EmitJitRoots(roots);
1826 EmitJitRootPatches(code, roots_data);
1827 }
1828
GetArrayAllocationEntrypoint(HNewArray * new_array)1829 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
1830 switch (new_array->GetComponentSizeShift()) {
1831 case 0: return kQuickAllocArrayResolved8;
1832 case 1: return kQuickAllocArrayResolved16;
1833 case 2: return kQuickAllocArrayResolved32;
1834 case 3: return kQuickAllocArrayResolved64;
1835 }
1836 LOG(FATAL) << "Unreachable";
1837 UNREACHABLE();
1838 }
1839
ScaleFactorForType(DataType::Type type)1840 ScaleFactor CodeGenerator::ScaleFactorForType(DataType::Type type) {
1841 switch (type) {
1842 case DataType::Type::kBool:
1843 case DataType::Type::kUint8:
1844 case DataType::Type::kInt8:
1845 return TIMES_1;
1846 case DataType::Type::kUint16:
1847 case DataType::Type::kInt16:
1848 return TIMES_2;
1849 case DataType::Type::kInt32:
1850 case DataType::Type::kUint32:
1851 case DataType::Type::kFloat32:
1852 case DataType::Type::kReference:
1853 return TIMES_4;
1854 case DataType::Type::kInt64:
1855 case DataType::Type::kUint64:
1856 case DataType::Type::kFloat64:
1857 return TIMES_8;
1858 case DataType::Type::kVoid:
1859 LOG(FATAL) << "Unreachable type " << type;
1860 UNREACHABLE();
1861 }
1862 }
1863
1864 } // namespace art
1865