1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_generator.h"
18
19 #ifdef ART_ENABLE_CODEGEN_arm
20 #include "code_generator_arm.h"
21 #include "code_generator_arm_vixl.h"
22 #endif
23
24 #ifdef ART_ENABLE_CODEGEN_arm64
25 #include "code_generator_arm64.h"
26 #endif
27
28 #ifdef ART_ENABLE_CODEGEN_x86
29 #include "code_generator_x86.h"
30 #endif
31
32 #ifdef ART_ENABLE_CODEGEN_x86_64
33 #include "code_generator_x86_64.h"
34 #endif
35
36 #ifdef ART_ENABLE_CODEGEN_mips
37 #include "code_generator_mips.h"
38 #endif
39
40 #ifdef ART_ENABLE_CODEGEN_mips64
41 #include "code_generator_mips64.h"
42 #endif
43
44 #include "bytecode_utils.h"
45 #include "class_linker.h"
46 #include "compiled_method.h"
47 #include "dex/verified_method.h"
48 #include "driver/compiler_driver.h"
49 #include "graph_visualizer.h"
50 #include "intern_table.h"
51 #include "intrinsics.h"
52 #include "leb128.h"
53 #include "mirror/array-inl.h"
54 #include "mirror/object_array-inl.h"
55 #include "mirror/object_reference.h"
56 #include "mirror/reference.h"
57 #include "mirror/string.h"
58 #include "parallel_move_resolver.h"
59 #include "ssa_liveness_analysis.h"
60 #include "scoped_thread_state_change-inl.h"
61 #include "thread-inl.h"
62 #include "utils/assembler.h"
63
64 namespace art {
65
66 // If true, we record the static and direct invokes in the invoke infos.
67 static constexpr bool kEnableDexLayoutOptimizations = false;
68
69 // Return whether a location is consistent with a type.
CheckType(Primitive::Type type,Location location)70 static bool CheckType(Primitive::Type type, Location location) {
71 if (location.IsFpuRegister()
72 || (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresFpuRegister))) {
73 return (type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble);
74 } else if (location.IsRegister() ||
75 (location.IsUnallocated() && (location.GetPolicy() == Location::kRequiresRegister))) {
76 return Primitive::IsIntegralType(type) || (type == Primitive::kPrimNot);
77 } else if (location.IsRegisterPair()) {
78 return type == Primitive::kPrimLong;
79 } else if (location.IsFpuRegisterPair()) {
80 return type == Primitive::kPrimDouble;
81 } else if (location.IsStackSlot()) {
82 return (Primitive::IsIntegralType(type) && type != Primitive::kPrimLong)
83 || (type == Primitive::kPrimFloat)
84 || (type == Primitive::kPrimNot);
85 } else if (location.IsDoubleStackSlot()) {
86 return (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
87 } else if (location.IsConstant()) {
88 if (location.GetConstant()->IsIntConstant()) {
89 return Primitive::IsIntegralType(type) && (type != Primitive::kPrimLong);
90 } else if (location.GetConstant()->IsNullConstant()) {
91 return type == Primitive::kPrimNot;
92 } else if (location.GetConstant()->IsLongConstant()) {
93 return type == Primitive::kPrimLong;
94 } else if (location.GetConstant()->IsFloatConstant()) {
95 return type == Primitive::kPrimFloat;
96 } else {
97 return location.GetConstant()->IsDoubleConstant()
98 && (type == Primitive::kPrimDouble);
99 }
100 } else {
101 return location.IsInvalid() || (location.GetPolicy() == Location::kAny);
102 }
103 }
104
105 // Check that a location summary is consistent with an instruction.
CheckTypeConsistency(HInstruction * instruction)106 static bool CheckTypeConsistency(HInstruction* instruction) {
107 LocationSummary* locations = instruction->GetLocations();
108 if (locations == nullptr) {
109 return true;
110 }
111
112 if (locations->Out().IsUnallocated()
113 && (locations->Out().GetPolicy() == Location::kSameAsFirstInput)) {
114 DCHECK(CheckType(instruction->GetType(), locations->InAt(0)))
115 << instruction->GetType()
116 << " " << locations->InAt(0);
117 } else {
118 DCHECK(CheckType(instruction->GetType(), locations->Out()))
119 << instruction->GetType()
120 << " " << locations->Out();
121 }
122
123 HConstInputsRef inputs = instruction->GetInputs();
124 for (size_t i = 0; i < inputs.size(); ++i) {
125 DCHECK(CheckType(inputs[i]->GetType(), locations->InAt(i)))
126 << inputs[i]->GetType() << " " << locations->InAt(i);
127 }
128
129 HEnvironment* environment = instruction->GetEnvironment();
130 for (size_t i = 0; i < instruction->EnvironmentSize(); ++i) {
131 if (environment->GetInstructionAt(i) != nullptr) {
132 Primitive::Type type = environment->GetInstructionAt(i)->GetType();
133 DCHECK(CheckType(type, environment->GetLocationAt(i)))
134 << type << " " << environment->GetLocationAt(i);
135 } else {
136 DCHECK(environment->GetLocationAt(i).IsInvalid())
137 << environment->GetLocationAt(i);
138 }
139 }
140 return true;
141 }
142
GetCacheOffset(uint32_t index)143 size_t CodeGenerator::GetCacheOffset(uint32_t index) {
144 return sizeof(GcRoot<mirror::Object>) * index;
145 }
146
GetCachePointerOffset(uint32_t index)147 size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
148 auto pointer_size = InstructionSetPointerSize(GetInstructionSet());
149 return static_cast<size_t>(pointer_size) * index;
150 }
151
GetArrayLengthOffset(HArrayLength * array_length)152 uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) {
153 return array_length->IsStringLength()
154 ? mirror::String::CountOffset().Uint32Value()
155 : mirror::Array::LengthOffset().Uint32Value();
156 }
157
GetArrayDataOffset(HArrayGet * array_get)158 uint32_t CodeGenerator::GetArrayDataOffset(HArrayGet* array_get) {
159 DCHECK(array_get->GetType() == Primitive::kPrimChar || !array_get->IsStringCharAt());
160 return array_get->IsStringCharAt()
161 ? mirror::String::ValueOffset().Uint32Value()
162 : mirror::Array::DataOffset(Primitive::ComponentSize(array_get->GetType())).Uint32Value();
163 }
164
GoesToNextBlock(HBasicBlock * current,HBasicBlock * next) const165 bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
166 DCHECK_EQ((*block_order_)[current_block_index_], current);
167 return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
168 }
169
GetNextBlockToEmit() const170 HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
171 for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
172 HBasicBlock* block = (*block_order_)[i];
173 if (!block->IsSingleJump()) {
174 return block;
175 }
176 }
177 return nullptr;
178 }
179
FirstNonEmptyBlock(HBasicBlock * block) const180 HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
181 while (block->IsSingleJump()) {
182 block = block->GetSuccessors()[0];
183 }
184 return block;
185 }
186
187 class DisassemblyScope {
188 public:
DisassemblyScope(HInstruction * instruction,const CodeGenerator & codegen)189 DisassemblyScope(HInstruction* instruction, const CodeGenerator& codegen)
190 : codegen_(codegen), instruction_(instruction), start_offset_(static_cast<size_t>(-1)) {
191 if (codegen_.GetDisassemblyInformation() != nullptr) {
192 start_offset_ = codegen_.GetAssembler().CodeSize();
193 }
194 }
195
~DisassemblyScope()196 ~DisassemblyScope() {
197 // We avoid building this data when we know it will not be used.
198 if (codegen_.GetDisassemblyInformation() != nullptr) {
199 codegen_.GetDisassemblyInformation()->AddInstructionInterval(
200 instruction_, start_offset_, codegen_.GetAssembler().CodeSize());
201 }
202 }
203
204 private:
205 const CodeGenerator& codegen_;
206 HInstruction* instruction_;
207 size_t start_offset_;
208 };
209
210
GenerateSlowPaths()211 void CodeGenerator::GenerateSlowPaths() {
212 size_t code_start = 0;
213 for (const std::unique_ptr<SlowPathCode>& slow_path_unique_ptr : slow_paths_) {
214 SlowPathCode* slow_path = slow_path_unique_ptr.get();
215 current_slow_path_ = slow_path;
216 if (disasm_info_ != nullptr) {
217 code_start = GetAssembler()->CodeSize();
218 }
219 // Record the dex pc at start of slow path (required for java line number mapping).
220 MaybeRecordNativeDebugInfo(slow_path->GetInstruction(), slow_path->GetDexPc(), slow_path);
221 slow_path->EmitNativeCode(this);
222 if (disasm_info_ != nullptr) {
223 disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize());
224 }
225 }
226 current_slow_path_ = nullptr;
227 }
228
Compile(CodeAllocator * allocator)229 void CodeGenerator::Compile(CodeAllocator* allocator) {
230 // The register allocator already called `InitializeCodeGeneration`,
231 // where the frame size has been computed.
232 DCHECK(block_order_ != nullptr);
233 Initialize();
234
235 HGraphVisitor* instruction_visitor = GetInstructionVisitor();
236 DCHECK_EQ(current_block_index_, 0u);
237
238 size_t frame_start = GetAssembler()->CodeSize();
239 GenerateFrameEntry();
240 DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
241 if (disasm_info_ != nullptr) {
242 disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
243 }
244
245 for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
246 HBasicBlock* block = (*block_order_)[current_block_index_];
247 // Don't generate code for an empty block. Its predecessors will branch to its successor
248 // directly. Also, the label of that block will not be emitted, so this helps catch
249 // errors where we reference that label.
250 if (block->IsSingleJump()) continue;
251 Bind(block);
252 // This ensures that we have correct native line mapping for all native instructions.
253 // It is necessary to make stepping over a statement work. Otherwise, any initial
254 // instructions (e.g. moves) would be assumed to be the start of next statement.
255 MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
256 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
257 HInstruction* current = it.Current();
258 if (current->HasEnvironment()) {
259 // Create stackmap for HNativeDebugInfo or any instruction which calls native code.
260 // Note that we need correct mapping for the native PC of the call instruction,
261 // so the runtime's stackmap is not sufficient since it is at PC after the call.
262 MaybeRecordNativeDebugInfo(current, block->GetDexPc());
263 }
264 DisassemblyScope disassembly_scope(current, *this);
265 DCHECK(CheckTypeConsistency(current));
266 current->Accept(instruction_visitor);
267 }
268 }
269
270 GenerateSlowPaths();
271
272 // Emit catch stack maps at the end of the stack map stream as expected by the
273 // runtime exception handler.
274 if (graph_->HasTryCatch()) {
275 RecordCatchBlockInfo();
276 }
277
278 // Finalize instructions in assember;
279 Finalize(allocator);
280 }
281
Finalize(CodeAllocator * allocator)282 void CodeGenerator::Finalize(CodeAllocator* allocator) {
283 size_t code_size = GetAssembler()->CodeSize();
284 uint8_t* buffer = allocator->Allocate(code_size);
285
286 MemoryRegion code(buffer, code_size);
287 GetAssembler()->FinalizeInstructions(code);
288 }
289
EmitLinkerPatches(ArenaVector<LinkerPatch> * linker_patches ATTRIBUTE_UNUSED)290 void CodeGenerator::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches ATTRIBUTE_UNUSED) {
291 // No linker patches by default.
292 }
293
InitializeCodeGeneration(size_t number_of_spill_slots,size_t maximum_safepoint_spill_size,size_t number_of_out_slots,const ArenaVector<HBasicBlock * > & block_order)294 void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
295 size_t maximum_safepoint_spill_size,
296 size_t number_of_out_slots,
297 const ArenaVector<HBasicBlock*>& block_order) {
298 block_order_ = &block_order;
299 DCHECK(!block_order.empty());
300 DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
301 ComputeSpillMask();
302 first_register_slot_in_slow_path_ = RoundUp(
303 (number_of_out_slots + number_of_spill_slots) * kVRegSize, GetPreferredSlotsAlignment());
304
305 if (number_of_spill_slots == 0
306 && !HasAllocatedCalleeSaveRegisters()
307 && IsLeafMethod()
308 && !RequiresCurrentMethod()) {
309 DCHECK_EQ(maximum_safepoint_spill_size, 0u);
310 SetFrameSize(CallPushesPC() ? GetWordSize() : 0);
311 } else {
312 SetFrameSize(RoundUp(
313 first_register_slot_in_slow_path_
314 + maximum_safepoint_spill_size
315 + (GetGraph()->HasShouldDeoptimizeFlag() ? kShouldDeoptimizeFlagSize : 0)
316 + FrameEntrySpillSize(),
317 kStackAlignment));
318 }
319 }
320
CreateCommonInvokeLocationSummary(HInvoke * invoke,InvokeDexCallingConventionVisitor * visitor)321 void CodeGenerator::CreateCommonInvokeLocationSummary(
322 HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor) {
323 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
324 LocationSummary* locations = new (allocator) LocationSummary(invoke,
325 LocationSummary::kCallOnMainOnly);
326
327 for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
328 HInstruction* input = invoke->InputAt(i);
329 locations->SetInAt(i, visitor->GetNextLocation(input->GetType()));
330 }
331
332 locations->SetOut(visitor->GetReturnLocation(invoke->GetType()));
333
334 if (invoke->IsInvokeStaticOrDirect()) {
335 HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
336 switch (call->GetMethodLoadKind()) {
337 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
338 locations->SetInAt(call->GetSpecialInputIndex(), visitor->GetMethodLocation());
339 break;
340 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod:
341 locations->AddTemp(visitor->GetMethodLocation());
342 locations->SetInAt(call->GetSpecialInputIndex(), Location::RequiresRegister());
343 break;
344 default:
345 locations->AddTemp(visitor->GetMethodLocation());
346 break;
347 }
348 } else {
349 locations->AddTemp(visitor->GetMethodLocation());
350 }
351 }
352
GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved * invoke)353 void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke) {
354 MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetDexMethodIndex());
355
356 // Initialize to anything to silent compiler warnings.
357 QuickEntrypointEnum entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
358 switch (invoke->GetInvokeType()) {
359 case kStatic:
360 entrypoint = kQuickInvokeStaticTrampolineWithAccessCheck;
361 break;
362 case kDirect:
363 entrypoint = kQuickInvokeDirectTrampolineWithAccessCheck;
364 break;
365 case kVirtual:
366 entrypoint = kQuickInvokeVirtualTrampolineWithAccessCheck;
367 break;
368 case kSuper:
369 entrypoint = kQuickInvokeSuperTrampolineWithAccessCheck;
370 break;
371 case kInterface:
372 entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
373 break;
374 }
375 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
376 }
377
GenerateInvokePolymorphicCall(HInvokePolymorphic * invoke)378 void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
379 MoveConstant(invoke->GetLocations()->GetTemp(0), static_cast<int32_t>(invoke->GetType()));
380 QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
381 InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
382 }
383
CreateUnresolvedFieldLocationSummary(HInstruction * field_access,Primitive::Type field_type,const FieldAccessCallingConvention & calling_convention)384 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
385 HInstruction* field_access,
386 Primitive::Type field_type,
387 const FieldAccessCallingConvention& calling_convention) {
388 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
389 || field_access->IsUnresolvedInstanceFieldSet();
390 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
391 || field_access->IsUnresolvedStaticFieldGet();
392
393 ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
394 LocationSummary* locations =
395 new (allocator) LocationSummary(field_access, LocationSummary::kCallOnMainOnly);
396
397 locations->AddTemp(calling_convention.GetFieldIndexLocation());
398
399 if (is_instance) {
400 // Add the `this` object for instance field accesses.
401 locations->SetInAt(0, calling_convention.GetObjectLocation());
402 }
403
404 // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
405 // regardless of the the type. Because of that we forced to special case
406 // the access to floating point values.
407 if (is_get) {
408 if (Primitive::IsFloatingPointType(field_type)) {
409 // The return value will be stored in regular registers while register
410 // allocator expects it in a floating point register.
411 // Note We don't need to request additional temps because the return
412 // register(s) are already blocked due the call and they may overlap with
413 // the input or field index.
414 // The transfer between the two will be done at codegen level.
415 locations->SetOut(calling_convention.GetFpuLocation(field_type));
416 } else {
417 locations->SetOut(calling_convention.GetReturnLocation(field_type));
418 }
419 } else {
420 size_t set_index = is_instance ? 1 : 0;
421 if (Primitive::IsFloatingPointType(field_type)) {
422 // The set value comes from a float location while the calling convention
423 // expects it in a regular register location. Allocate a temp for it and
424 // make the transfer at codegen.
425 AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
426 locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
427 } else {
428 locations->SetInAt(set_index,
429 calling_convention.GetSetValueLocation(field_type, is_instance));
430 }
431 }
432 }
433
GenerateUnresolvedFieldAccess(HInstruction * field_access,Primitive::Type field_type,uint32_t field_index,uint32_t dex_pc,const FieldAccessCallingConvention & calling_convention)434 void CodeGenerator::GenerateUnresolvedFieldAccess(
435 HInstruction* field_access,
436 Primitive::Type field_type,
437 uint32_t field_index,
438 uint32_t dex_pc,
439 const FieldAccessCallingConvention& calling_convention) {
440 LocationSummary* locations = field_access->GetLocations();
441
442 MoveConstant(locations->GetTemp(0), field_index);
443
444 bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
445 || field_access->IsUnresolvedInstanceFieldSet();
446 bool is_get = field_access->IsUnresolvedInstanceFieldGet()
447 || field_access->IsUnresolvedStaticFieldGet();
448
449 if (!is_get && Primitive::IsFloatingPointType(field_type)) {
450 // Copy the float value to be set into the calling convention register.
451 // Note that using directly the temp location is problematic as we don't
452 // support temp register pairs. To avoid boilerplate conversion code, use
453 // the location from the calling convention.
454 MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
455 locations->InAt(is_instance ? 1 : 0),
456 (Primitive::Is64BitType(field_type) ? Primitive::kPrimLong : Primitive::kPrimInt));
457 }
458
459 QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings.
460 switch (field_type) {
461 case Primitive::kPrimBoolean:
462 entrypoint = is_instance
463 ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
464 : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
465 break;
466 case Primitive::kPrimByte:
467 entrypoint = is_instance
468 ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
469 : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
470 break;
471 case Primitive::kPrimShort:
472 entrypoint = is_instance
473 ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
474 : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
475 break;
476 case Primitive::kPrimChar:
477 entrypoint = is_instance
478 ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
479 : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
480 break;
481 case Primitive::kPrimInt:
482 case Primitive::kPrimFloat:
483 entrypoint = is_instance
484 ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
485 : (is_get ? kQuickGet32Static : kQuickSet32Static);
486 break;
487 case Primitive::kPrimNot:
488 entrypoint = is_instance
489 ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
490 : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
491 break;
492 case Primitive::kPrimLong:
493 case Primitive::kPrimDouble:
494 entrypoint = is_instance
495 ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
496 : (is_get ? kQuickGet64Static : kQuickSet64Static);
497 break;
498 default:
499 LOG(FATAL) << "Invalid type " << field_type;
500 }
501 InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
502
503 if (is_get && Primitive::IsFloatingPointType(field_type)) {
504 MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
505 }
506 }
507
CreateLoadClassRuntimeCallLocationSummary(HLoadClass * cls,Location runtime_type_index_location,Location runtime_return_location)508 void CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(HLoadClass* cls,
509 Location runtime_type_index_location,
510 Location runtime_return_location) {
511 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
512 DCHECK_EQ(cls->InputCount(), 1u);
513 LocationSummary* locations = new (cls->GetBlock()->GetGraph()->GetArena()) LocationSummary(
514 cls, LocationSummary::kCallOnMainOnly);
515 locations->SetInAt(0, Location::NoLocation());
516 locations->AddTemp(runtime_type_index_location);
517 locations->SetOut(runtime_return_location);
518 }
519
GenerateLoadClassRuntimeCall(HLoadClass * cls)520 void CodeGenerator::GenerateLoadClassRuntimeCall(HLoadClass* cls) {
521 DCHECK_EQ(cls->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
522 LocationSummary* locations = cls->GetLocations();
523 MoveConstant(locations->GetTemp(0), cls->GetTypeIndex().index_);
524 if (cls->NeedsAccessCheck()) {
525 CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
526 InvokeRuntime(kQuickInitializeTypeAndVerifyAccess, cls, cls->GetDexPc());
527 } else if (cls->MustGenerateClinitCheck()) {
528 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
529 InvokeRuntime(kQuickInitializeStaticStorage, cls, cls->GetDexPc());
530 } else {
531 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
532 InvokeRuntime(kQuickInitializeType, cls, cls->GetDexPc());
533 }
534 }
535
BlockIfInRegister(Location location,bool is_out) const536 void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
537 // The DCHECKS below check that a register is not specified twice in
538 // the summary. The out location can overlap with an input, so we need
539 // to special case it.
540 if (location.IsRegister()) {
541 DCHECK(is_out || !blocked_core_registers_[location.reg()]);
542 blocked_core_registers_[location.reg()] = true;
543 } else if (location.IsFpuRegister()) {
544 DCHECK(is_out || !blocked_fpu_registers_[location.reg()]);
545 blocked_fpu_registers_[location.reg()] = true;
546 } else if (location.IsFpuRegisterPair()) {
547 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()]);
548 blocked_fpu_registers_[location.AsFpuRegisterPairLow<int>()] = true;
549 DCHECK(is_out || !blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()]);
550 blocked_fpu_registers_[location.AsFpuRegisterPairHigh<int>()] = true;
551 } else if (location.IsRegisterPair()) {
552 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairLow<int>()]);
553 blocked_core_registers_[location.AsRegisterPairLow<int>()] = true;
554 DCHECK(is_out || !blocked_core_registers_[location.AsRegisterPairHigh<int>()]);
555 blocked_core_registers_[location.AsRegisterPairHigh<int>()] = true;
556 }
557 }
558
AllocateLocations(HInstruction * instruction)559 void CodeGenerator::AllocateLocations(HInstruction* instruction) {
560 instruction->Accept(GetLocationBuilder());
561 DCHECK(CheckTypeConsistency(instruction));
562 LocationSummary* locations = instruction->GetLocations();
563 if (!instruction->IsSuspendCheckEntry()) {
564 if (locations != nullptr) {
565 if (locations->CanCall()) {
566 MarkNotLeaf();
567 } else if (locations->Intrinsified() &&
568 instruction->IsInvokeStaticOrDirect() &&
569 !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) {
570 // A static method call that has been fully intrinsified, and cannot call on the slow
571 // path or refer to the current method directly, no longer needs current method.
572 return;
573 }
574 }
575 if (instruction->NeedsCurrentMethod()) {
576 SetRequiresCurrentMethod();
577 }
578 }
579 }
580
MaybeRecordStat(MethodCompilationStat compilation_stat,size_t count) const581 void CodeGenerator::MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count) const {
582 if (stats_ != nullptr) {
583 stats_->RecordStat(compilation_stat, count);
584 }
585 }
586
Create(HGraph * graph,InstructionSet instruction_set,const InstructionSetFeatures & isa_features,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)587 std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
588 InstructionSet instruction_set,
589 const InstructionSetFeatures& isa_features,
590 const CompilerOptions& compiler_options,
591 OptimizingCompilerStats* stats) {
592 ArenaAllocator* arena = graph->GetArena();
593 switch (instruction_set) {
594 #ifdef ART_ENABLE_CODEGEN_arm
595 case kArm:
596 case kThumb2: {
597 if (kArmUseVIXL32) {
598 return std::unique_ptr<CodeGenerator>(
599 new (arena) arm::CodeGeneratorARMVIXL(graph,
600 *isa_features.AsArmInstructionSetFeatures(),
601 compiler_options,
602 stats));
603 } else {
604 return std::unique_ptr<CodeGenerator>(
605 new (arena) arm::CodeGeneratorARM(graph,
606 *isa_features.AsArmInstructionSetFeatures(),
607 compiler_options,
608 stats));
609 }
610 }
611 #endif
612 #ifdef ART_ENABLE_CODEGEN_arm64
613 case kArm64: {
614 return std::unique_ptr<CodeGenerator>(
615 new (arena) arm64::CodeGeneratorARM64(graph,
616 *isa_features.AsArm64InstructionSetFeatures(),
617 compiler_options,
618 stats));
619 }
620 #endif
621 #ifdef ART_ENABLE_CODEGEN_mips
622 case kMips: {
623 return std::unique_ptr<CodeGenerator>(
624 new (arena) mips::CodeGeneratorMIPS(graph,
625 *isa_features.AsMipsInstructionSetFeatures(),
626 compiler_options,
627 stats));
628 }
629 #endif
630 #ifdef ART_ENABLE_CODEGEN_mips64
631 case kMips64: {
632 return std::unique_ptr<CodeGenerator>(
633 new (arena) mips64::CodeGeneratorMIPS64(graph,
634 *isa_features.AsMips64InstructionSetFeatures(),
635 compiler_options,
636 stats));
637 }
638 #endif
639 #ifdef ART_ENABLE_CODEGEN_x86
640 case kX86: {
641 return std::unique_ptr<CodeGenerator>(
642 new (arena) x86::CodeGeneratorX86(graph,
643 *isa_features.AsX86InstructionSetFeatures(),
644 compiler_options,
645 stats));
646 }
647 #endif
648 #ifdef ART_ENABLE_CODEGEN_x86_64
649 case kX86_64: {
650 return std::unique_ptr<CodeGenerator>(
651 new (arena) x86_64::CodeGeneratorX86_64(graph,
652 *isa_features.AsX86_64InstructionSetFeatures(),
653 compiler_options,
654 stats));
655 }
656 #endif
657 default:
658 return nullptr;
659 }
660 }
661
ComputeStackMapAndMethodInfoSize(size_t * stack_map_size,size_t * method_info_size)662 void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size,
663 size_t* method_info_size) {
664 DCHECK(stack_map_size != nullptr);
665 DCHECK(method_info_size != nullptr);
666 *stack_map_size = stack_map_stream_.PrepareForFillIn();
667 *method_info_size = stack_map_stream_.ComputeMethodInfoSize();
668 }
669
CheckCovers(uint32_t dex_pc,const HGraph & graph,const CodeInfo & code_info,const ArenaVector<HSuspendCheck * > & loop_headers,ArenaVector<size_t> * covered)670 static void CheckCovers(uint32_t dex_pc,
671 const HGraph& graph,
672 const CodeInfo& code_info,
673 const ArenaVector<HSuspendCheck*>& loop_headers,
674 ArenaVector<size_t>* covered) {
675 CodeInfoEncoding encoding = code_info.ExtractEncoding();
676 for (size_t i = 0; i < loop_headers.size(); ++i) {
677 if (loop_headers[i]->GetDexPc() == dex_pc) {
678 if (graph.IsCompilingOsr()) {
679 DCHECK(code_info.GetOsrStackMapForDexPc(dex_pc, encoding).IsValid());
680 }
681 ++(*covered)[i];
682 }
683 }
684 }
685
686 // Debug helper to ensure loop entries in compiled code are matched by
687 // dex branch instructions.
CheckLoopEntriesCanBeUsedForOsr(const HGraph & graph,const CodeInfo & code_info,const DexFile::CodeItem & code_item)688 static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
689 const CodeInfo& code_info,
690 const DexFile::CodeItem& code_item) {
691 if (graph.HasTryCatch()) {
692 // One can write loops through try/catch, which we do not support for OSR anyway.
693 return;
694 }
695 ArenaVector<HSuspendCheck*> loop_headers(graph.GetArena()->Adapter(kArenaAllocMisc));
696 for (HBasicBlock* block : graph.GetReversePostOrder()) {
697 if (block->IsLoopHeader()) {
698 HSuspendCheck* suspend_check = block->GetLoopInformation()->GetSuspendCheck();
699 if (!suspend_check->GetEnvironment()->IsFromInlinedInvoke()) {
700 loop_headers.push_back(suspend_check);
701 }
702 }
703 }
704 ArenaVector<size_t> covered(loop_headers.size(), 0, graph.GetArena()->Adapter(kArenaAllocMisc));
705 const uint16_t* code_ptr = code_item.insns_;
706 const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
707
708 size_t dex_pc = 0;
709 while (code_ptr < code_end) {
710 const Instruction& instruction = *Instruction::At(code_ptr);
711 if (instruction.IsBranch()) {
712 uint32_t target = dex_pc + instruction.GetTargetOffset();
713 CheckCovers(target, graph, code_info, loop_headers, &covered);
714 } else if (instruction.IsSwitch()) {
715 DexSwitchTable table(instruction, dex_pc);
716 uint16_t num_entries = table.GetNumEntries();
717 size_t offset = table.GetFirstValueIndex();
718
719 // Use a larger loop counter type to avoid overflow issues.
720 for (size_t i = 0; i < num_entries; ++i) {
721 // The target of the case.
722 uint32_t target = dex_pc + table.GetEntryAt(i + offset);
723 CheckCovers(target, graph, code_info, loop_headers, &covered);
724 }
725 }
726 dex_pc += instruction.SizeInCodeUnits();
727 code_ptr += instruction.SizeInCodeUnits();
728 }
729
730 for (size_t i = 0; i < covered.size(); ++i) {
731 DCHECK_NE(covered[i], 0u) << "Loop in compiled code has no dex branch equivalent";
732 }
733 }
734
BuildStackMaps(MemoryRegion stack_map_region,MemoryRegion method_info_region,const DexFile::CodeItem & code_item)735 void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
736 MemoryRegion method_info_region,
737 const DexFile::CodeItem& code_item) {
738 stack_map_stream_.FillInCodeInfo(stack_map_region);
739 stack_map_stream_.FillInMethodInfo(method_info_region);
740 if (kIsDebugBuild) {
741 CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), code_item);
742 }
743 }
744
RecordPcInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)745 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
746 uint32_t dex_pc,
747 SlowPathCode* slow_path) {
748 if (instruction != nullptr) {
749 // The code generated for some type conversions
750 // may call the runtime, thus normally requiring a subsequent
751 // call to this method. However, the method verifier does not
752 // produce PC information for certain instructions, which are
753 // considered "atomic" (they cannot join a GC).
754 // Therefore we do not currently record PC information for such
755 // instructions. As this may change later, we added this special
756 // case so that code generators may nevertheless call
757 // CodeGenerator::RecordPcInfo without triggering an error in
758 // CodeGenerator::BuildNativeGCMap ("Missing ref for dex pc 0x")
759 // thereafter.
760 if (instruction->IsTypeConversion()) {
761 return;
762 }
763 if (instruction->IsRem()) {
764 Primitive::Type type = instruction->AsRem()->GetResultType();
765 if ((type == Primitive::kPrimFloat) || (type == Primitive::kPrimDouble)) {
766 return;
767 }
768 }
769 }
770
771 uint32_t outer_dex_pc = dex_pc;
772 uint32_t outer_environment_size = 0;
773 uint32_t inlining_depth = 0;
774 if (instruction != nullptr) {
775 for (HEnvironment* environment = instruction->GetEnvironment();
776 environment != nullptr;
777 environment = environment->GetParent()) {
778 outer_dex_pc = environment->GetDexPc();
779 outer_environment_size = environment->Size();
780 if (environment != instruction->GetEnvironment()) {
781 inlining_depth++;
782 }
783 }
784 }
785
786 // Collect PC infos for the mapping table.
787 uint32_t native_pc = GetAssembler()->CodePosition();
788
789 if (instruction == nullptr) {
790 // For stack overflow checks and native-debug-info entries without dex register
791 // mapping (i.e. start of basic block or start of slow path).
792 stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
793 stack_map_stream_.EndStackMapEntry();
794 return;
795 }
796 LocationSummary* locations = instruction->GetLocations();
797
798 uint32_t register_mask = locations->GetRegisterMask();
799 DCHECK_EQ(register_mask & ~locations->GetLiveRegisters()->GetCoreRegisters(), 0u);
800 if (locations->OnlyCallsOnSlowPath()) {
801 // In case of slow path, we currently set the location of caller-save registers
802 // to register (instead of their stack location when pushed before the slow-path
803 // call). Therefore register_mask contains both callee-save and caller-save
804 // registers that hold objects. We must remove the spilled caller-save from the
805 // mask, since they will be overwritten by the callee.
806 uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true);
807 register_mask &= ~spills;
808 } else {
809 // The register mask must be a subset of callee-save registers.
810 DCHECK_EQ(register_mask & core_callee_save_mask_, register_mask);
811 }
812 stack_map_stream_.BeginStackMapEntry(outer_dex_pc,
813 native_pc,
814 register_mask,
815 locations->GetStackMask(),
816 outer_environment_size,
817 inlining_depth);
818
819 HEnvironment* const environment = instruction->GetEnvironment();
820 EmitEnvironment(environment, slow_path);
821 // Record invoke info, the common case for the trampoline is super and static invokes. Only
822 // record these to reduce oat file size.
823 if (kEnableDexLayoutOptimizations) {
824 if (environment != nullptr &&
825 instruction->IsInvoke() &&
826 instruction->IsInvokeStaticOrDirect()) {
827 HInvoke* const invoke = instruction->AsInvoke();
828 stack_map_stream_.AddInvoke(invoke->GetInvokeType(), invoke->GetDexMethodIndex());
829 }
830 }
831 stack_map_stream_.EndStackMapEntry();
832
833 HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
834 if (instruction->IsSuspendCheck() &&
835 (info != nullptr) &&
836 graph_->IsCompilingOsr() &&
837 (inlining_depth == 0)) {
838 DCHECK_EQ(info->GetSuspendCheck(), instruction);
839 // We duplicate the stack map as a marker that this stack map can be an OSR entry.
840 // Duplicating it avoids having the runtime recognize and skip an OSR stack map.
841 DCHECK(info->IsIrreducible());
842 stack_map_stream_.BeginStackMapEntry(
843 dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0);
844 EmitEnvironment(instruction->GetEnvironment(), slow_path);
845 stack_map_stream_.EndStackMapEntry();
846 if (kIsDebugBuild) {
847 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
848 HInstruction* in_environment = environment->GetInstructionAt(i);
849 if (in_environment != nullptr) {
850 DCHECK(in_environment->IsPhi() || in_environment->IsConstant());
851 Location location = environment->GetLocationAt(i);
852 DCHECK(location.IsStackSlot() ||
853 location.IsDoubleStackSlot() ||
854 location.IsConstant() ||
855 location.IsInvalid());
856 if (location.IsStackSlot() || location.IsDoubleStackSlot()) {
857 DCHECK_LT(location.GetStackIndex(), static_cast<int32_t>(GetFrameSize()));
858 }
859 }
860 }
861 }
862 } else if (kIsDebugBuild) {
863 // Ensure stack maps are unique, by checking that the native pc in the stack map
864 // last emitted is different than the native pc of the stack map just emitted.
865 size_t number_of_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
866 if (number_of_stack_maps > 1) {
867 DCHECK_NE(stack_map_stream_.GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
868 stack_map_stream_.GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
869 }
870 }
871 }
872
HasStackMapAtCurrentPc()873 bool CodeGenerator::HasStackMapAtCurrentPc() {
874 uint32_t pc = GetAssembler()->CodeSize();
875 size_t count = stack_map_stream_.GetNumberOfStackMaps();
876 if (count == 0) {
877 return false;
878 }
879 CodeOffset native_pc_offset = stack_map_stream_.GetStackMap(count - 1).native_pc_code_offset;
880 return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
881 }
882
MaybeRecordNativeDebugInfo(HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)883 void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
884 uint32_t dex_pc,
885 SlowPathCode* slow_path) {
886 if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
887 if (HasStackMapAtCurrentPc()) {
888 // Ensure that we do not collide with the stack map of the previous instruction.
889 GenerateNop();
890 }
891 RecordPcInfo(instruction, dex_pc, slow_path);
892 }
893 }
894
RecordCatchBlockInfo()895 void CodeGenerator::RecordCatchBlockInfo() {
896 ArenaAllocator* arena = graph_->GetArena();
897
898 for (HBasicBlock* block : *block_order_) {
899 if (!block->IsCatchBlock()) {
900 continue;
901 }
902
903 uint32_t dex_pc = block->GetDexPc();
904 uint32_t num_vregs = graph_->GetNumberOfVRegs();
905 uint32_t inlining_depth = 0; // Inlining of catch blocks is not supported at the moment.
906 uint32_t native_pc = GetAddressOf(block);
907 uint32_t register_mask = 0; // Not used.
908
909 // The stack mask is not used, so we leave it empty.
910 ArenaBitVector* stack_mask =
911 ArenaBitVector::Create(arena, 0, /* expandable */ true, kArenaAllocCodeGenerator);
912
913 stack_map_stream_.BeginStackMapEntry(dex_pc,
914 native_pc,
915 register_mask,
916 stack_mask,
917 num_vregs,
918 inlining_depth);
919
920 HInstruction* current_phi = block->GetFirstPhi();
921 for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
922 while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
923 HInstruction* next_phi = current_phi->GetNext();
924 DCHECK(next_phi == nullptr ||
925 current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
926 << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
927 current_phi = next_phi;
928 }
929
930 if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
931 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
932 } else {
933 Location location = current_phi->GetLiveInterval()->ToLocation();
934 switch (location.GetKind()) {
935 case Location::kStackSlot: {
936 stack_map_stream_.AddDexRegisterEntry(
937 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
938 break;
939 }
940 case Location::kDoubleStackSlot: {
941 stack_map_stream_.AddDexRegisterEntry(
942 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
943 stack_map_stream_.AddDexRegisterEntry(
944 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
945 ++vreg;
946 DCHECK_LT(vreg, num_vregs);
947 break;
948 }
949 default: {
950 // All catch phis must be allocated to a stack slot.
951 LOG(FATAL) << "Unexpected kind " << location.GetKind();
952 UNREACHABLE();
953 }
954 }
955 }
956 }
957
958 stack_map_stream_.EndStackMapEntry();
959 }
960 }
961
EmitEnvironment(HEnvironment * environment,SlowPathCode * slow_path)962 void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
963 if (environment == nullptr) return;
964
965 if (environment->GetParent() != nullptr) {
966 // We emit the parent environment first.
967 EmitEnvironment(environment->GetParent(), slow_path);
968 stack_map_stream_.BeginInlineInfoEntry(environment->GetMethod(),
969 environment->GetDexPc(),
970 environment->Size(),
971 &graph_->GetDexFile());
972 }
973
974 // Walk over the environment, and record the location of dex registers.
975 for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
976 HInstruction* current = environment->GetInstructionAt(i);
977 if (current == nullptr) {
978 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
979 continue;
980 }
981
982 Location location = environment->GetLocationAt(i);
983 switch (location.GetKind()) {
984 case Location::kConstant: {
985 DCHECK_EQ(current, location.GetConstant());
986 if (current->IsLongConstant()) {
987 int64_t value = current->AsLongConstant()->GetValue();
988 stack_map_stream_.AddDexRegisterEntry(
989 DexRegisterLocation::Kind::kConstant, Low32Bits(value));
990 stack_map_stream_.AddDexRegisterEntry(
991 DexRegisterLocation::Kind::kConstant, High32Bits(value));
992 ++i;
993 DCHECK_LT(i, environment_size);
994 } else if (current->IsDoubleConstant()) {
995 int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
996 stack_map_stream_.AddDexRegisterEntry(
997 DexRegisterLocation::Kind::kConstant, Low32Bits(value));
998 stack_map_stream_.AddDexRegisterEntry(
999 DexRegisterLocation::Kind::kConstant, High32Bits(value));
1000 ++i;
1001 DCHECK_LT(i, environment_size);
1002 } else if (current->IsIntConstant()) {
1003 int32_t value = current->AsIntConstant()->GetValue();
1004 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
1005 } else if (current->IsNullConstant()) {
1006 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
1007 } else {
1008 DCHECK(current->IsFloatConstant()) << current->DebugName();
1009 int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
1010 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
1011 }
1012 break;
1013 }
1014
1015 case Location::kStackSlot: {
1016 stack_map_stream_.AddDexRegisterEntry(
1017 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1018 break;
1019 }
1020
1021 case Location::kDoubleStackSlot: {
1022 stack_map_stream_.AddDexRegisterEntry(
1023 DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
1024 stack_map_stream_.AddDexRegisterEntry(
1025 DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
1026 ++i;
1027 DCHECK_LT(i, environment_size);
1028 break;
1029 }
1030
1031 case Location::kRegister : {
1032 int id = location.reg();
1033 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
1034 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
1035 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1036 if (current->GetType() == Primitive::kPrimLong) {
1037 stack_map_stream_.AddDexRegisterEntry(
1038 DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
1039 ++i;
1040 DCHECK_LT(i, environment_size);
1041 }
1042 } else {
1043 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
1044 if (current->GetType() == Primitive::kPrimLong) {
1045 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
1046 ++i;
1047 DCHECK_LT(i, environment_size);
1048 }
1049 }
1050 break;
1051 }
1052
1053 case Location::kFpuRegister : {
1054 int id = location.reg();
1055 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
1056 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
1057 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1058 if (current->GetType() == Primitive::kPrimDouble) {
1059 stack_map_stream_.AddDexRegisterEntry(
1060 DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
1061 ++i;
1062 DCHECK_LT(i, environment_size);
1063 }
1064 } else {
1065 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
1066 if (current->GetType() == Primitive::kPrimDouble) {
1067 stack_map_stream_.AddDexRegisterEntry(
1068 DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
1069 ++i;
1070 DCHECK_LT(i, environment_size);
1071 }
1072 }
1073 break;
1074 }
1075
1076 case Location::kFpuRegisterPair : {
1077 int low = location.low();
1078 int high = location.high();
1079 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
1080 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
1081 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1082 } else {
1083 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
1084 }
1085 if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
1086 uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
1087 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1088 ++i;
1089 } else {
1090 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
1091 ++i;
1092 }
1093 DCHECK_LT(i, environment_size);
1094 break;
1095 }
1096
1097 case Location::kRegisterPair : {
1098 int low = location.low();
1099 int high = location.high();
1100 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
1101 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
1102 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1103 } else {
1104 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
1105 }
1106 if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
1107 uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
1108 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
1109 } else {
1110 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
1111 }
1112 ++i;
1113 DCHECK_LT(i, environment_size);
1114 break;
1115 }
1116
1117 case Location::kInvalid: {
1118 stack_map_stream_.AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
1119 break;
1120 }
1121
1122 default:
1123 LOG(FATAL) << "Unexpected kind " << location.GetKind();
1124 }
1125 }
1126
1127 if (environment->GetParent() != nullptr) {
1128 stack_map_stream_.EndInlineInfoEntry();
1129 }
1130 }
1131
CanMoveNullCheckToUser(HNullCheck * null_check)1132 bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
1133 HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
1134
1135 return (first_next_not_move != nullptr)
1136 && first_next_not_move->CanDoImplicitNullCheckOn(null_check->InputAt(0));
1137 }
1138
MaybeRecordImplicitNullCheck(HInstruction * instr)1139 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
1140 if (!compiler_options_.GetImplicitNullChecks()) {
1141 return;
1142 }
1143
1144 // If we are from a static path don't record the pc as we can't throw NPE.
1145 // NB: having the checks here makes the code much less verbose in the arch
1146 // specific code generators.
1147 if (instr->IsStaticFieldSet() || instr->IsStaticFieldGet()) {
1148 return;
1149 }
1150
1151 if (!instr->CanDoImplicitNullCheckOn(instr->InputAt(0))) {
1152 return;
1153 }
1154
1155 // Find the first previous instruction which is not a move.
1156 HInstruction* first_prev_not_move = instr->GetPreviousDisregardingMoves();
1157
1158 // If the instruction is a null check it means that `instr` is the first user
1159 // and needs to record the pc.
1160 if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
1161 HNullCheck* null_check = first_prev_not_move->AsNullCheck();
1162 // TODO: The parallel moves modify the environment. Their changes need to be
1163 // reverted otherwise the stack maps at the throw point will not be correct.
1164 RecordPcInfo(null_check, null_check->GetDexPc());
1165 }
1166 }
1167
CreateThrowingSlowPathLocations(HInstruction * instruction,RegisterSet caller_saves)1168 LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
1169 RegisterSet caller_saves) {
1170 // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
1171 // HSuspendCheck from entry block). However, it will still get a valid stack frame
1172 // because the HNullCheck needs an environment.
1173 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
1174 // When throwing from a try block, we may need to retrieve dalvik registers from
1175 // physical registers and we also need to set up stack mask for GC. This is
1176 // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
1177 bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
1178 if (can_throw_into_catch_block) {
1179 call_kind = LocationSummary::kCallOnSlowPath;
1180 }
1181 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
1182 if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
1183 locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
1184 }
1185 DCHECK(!instruction->HasUses());
1186 return locations;
1187 }
1188
GenerateNullCheck(HNullCheck * instruction)1189 void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
1190 if (compiler_options_.GetImplicitNullChecks()) {
1191 MaybeRecordStat(kImplicitNullCheckGenerated);
1192 GenerateImplicitNullCheck(instruction);
1193 } else {
1194 MaybeRecordStat(kExplicitNullCheckGenerated);
1195 GenerateExplicitNullCheck(instruction);
1196 }
1197 }
1198
ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck * suspend_check) const1199 void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const {
1200 LocationSummary* locations = suspend_check->GetLocations();
1201 HBasicBlock* block = suspend_check->GetBlock();
1202 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == suspend_check);
1203 DCHECK(block->IsLoopHeader());
1204
1205 for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
1206 HInstruction* current = it.Current();
1207 LiveInterval* interval = current->GetLiveInterval();
1208 // We only need to clear bits of loop phis containing objects and allocated in register.
1209 // Loop phis allocated on stack already have the object in the stack.
1210 if (current->GetType() == Primitive::kPrimNot
1211 && interval->HasRegister()
1212 && interval->HasSpillSlot()) {
1213 locations->ClearStackBit(interval->GetSpillSlot() / kVRegSize);
1214 }
1215 }
1216 }
1217
EmitParallelMoves(Location from1,Location to1,Primitive::Type type1,Location from2,Location to2,Primitive::Type type2)1218 void CodeGenerator::EmitParallelMoves(Location from1,
1219 Location to1,
1220 Primitive::Type type1,
1221 Location from2,
1222 Location to2,
1223 Primitive::Type type2) {
1224 HParallelMove parallel_move(GetGraph()->GetArena());
1225 parallel_move.AddMove(from1, to1, type1, nullptr);
1226 parallel_move.AddMove(from2, to2, type2, nullptr);
1227 GetMoveResolver()->EmitNativeCode(¶llel_move);
1228 }
1229
ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,SlowPathCode * slow_path)1230 void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
1231 HInstruction* instruction,
1232 SlowPathCode* slow_path) {
1233 // Ensure that the call kind indication given to the register allocator is
1234 // coherent with the runtime call generated.
1235 if (slow_path == nullptr) {
1236 DCHECK(instruction->GetLocations()->WillCall())
1237 << "instruction->DebugName()=" << instruction->DebugName();
1238 } else {
1239 DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
1240 << "instruction->DebugName()=" << instruction->DebugName()
1241 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1242 }
1243
1244 // Check that the GC side effect is set when required.
1245 // TODO: Reverse EntrypointCanTriggerGC
1246 if (EntrypointCanTriggerGC(entrypoint)) {
1247 if (slow_path == nullptr) {
1248 DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
1249 << "instruction->DebugName()=" << instruction->DebugName()
1250 << " instruction->GetSideEffects().ToString()="
1251 << instruction->GetSideEffects().ToString();
1252 } else {
1253 DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
1254 // When (non-Baker) read barriers are enabled, some instructions
1255 // use a slow path to emit a read barrier, which does not trigger
1256 // GC.
1257 (kEmitCompilerReadBarrier &&
1258 !kUseBakerReadBarrier &&
1259 (instruction->IsInstanceFieldGet() ||
1260 instruction->IsStaticFieldGet() ||
1261 instruction->IsArrayGet() ||
1262 instruction->IsLoadClass() ||
1263 instruction->IsLoadString() ||
1264 instruction->IsInstanceOf() ||
1265 instruction->IsCheckCast() ||
1266 (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
1267 << "instruction->DebugName()=" << instruction->DebugName()
1268 << " instruction->GetSideEffects().ToString()="
1269 << instruction->GetSideEffects().ToString()
1270 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1271 }
1272 } else {
1273 // The GC side effect is not required for the instruction. But the instruction might still have
1274 // it, for example if it calls other entrypoints requiring it.
1275 }
1276
1277 // Check the coherency of leaf information.
1278 DCHECK(instruction->IsSuspendCheck()
1279 || ((slow_path != nullptr) && slow_path->IsFatal())
1280 || instruction->GetLocations()->CanCall()
1281 || !IsLeafMethod())
1282 << instruction->DebugName() << ((slow_path != nullptr) ? slow_path->GetDescription() : "");
1283 }
1284
ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction * instruction,SlowPathCode * slow_path)1285 void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
1286 SlowPathCode* slow_path) {
1287 DCHECK(instruction->GetLocations()->OnlyCallsOnSlowPath())
1288 << "instruction->DebugName()=" << instruction->DebugName()
1289 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1290 // Only the Baker read barrier marking slow path used by certains
1291 // instructions is expected to invoke the runtime without recording
1292 // PC-related information.
1293 DCHECK(kUseBakerReadBarrier);
1294 DCHECK(instruction->IsInstanceFieldGet() ||
1295 instruction->IsStaticFieldGet() ||
1296 instruction->IsArrayGet() ||
1297 instruction->IsArraySet() ||
1298 instruction->IsLoadClass() ||
1299 instruction->IsLoadString() ||
1300 instruction->IsInstanceOf() ||
1301 instruction->IsCheckCast() ||
1302 (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()) ||
1303 (instruction->IsInvokeStaticOrDirect() && instruction->GetLocations()->Intrinsified()))
1304 << "instruction->DebugName()=" << instruction->DebugName()
1305 << " slow_path->GetDescription()=" << slow_path->GetDescription();
1306 }
1307
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1308 void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1309 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1310
1311 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
1312 for (uint32_t i : LowToHighBits(core_spills)) {
1313 // If the register holds an object, update the stack mask.
1314 if (locations->RegisterContainsObject(i)) {
1315 locations->SetStackBit(stack_offset / kVRegSize);
1316 }
1317 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1318 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1319 saved_core_stack_offsets_[i] = stack_offset;
1320 stack_offset += codegen->SaveCoreRegister(stack_offset, i);
1321 }
1322
1323 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
1324 for (uint32_t i : LowToHighBits(fp_spills)) {
1325 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1326 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1327 saved_fpu_stack_offsets_[i] = stack_offset;
1328 stack_offset += codegen->SaveFloatingPointRegister(stack_offset, i);
1329 }
1330 }
1331
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)1332 void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
1333 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
1334
1335 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
1336 for (uint32_t i : LowToHighBits(core_spills)) {
1337 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1338 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1339 stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
1340 }
1341
1342 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
1343 for (uint32_t i : LowToHighBits(fp_spills)) {
1344 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
1345 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
1346 stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
1347 }
1348 }
1349
CreateSystemArrayCopyLocationSummary(HInvoke * invoke)1350 void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
1351 // Check to see if we have known failures that will cause us to have to bail out
1352 // to the runtime, and just generate the runtime call directly.
1353 HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
1354 HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
1355
1356 // The positions must be non-negative.
1357 if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
1358 (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
1359 // We will have to fail anyways.
1360 return;
1361 }
1362
1363 // The length must be >= 0.
1364 HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
1365 if (length != nullptr) {
1366 int32_t len = length->GetValue();
1367 if (len < 0) {
1368 // Just call as normal.
1369 return;
1370 }
1371 }
1372
1373 SystemArrayCopyOptimizations optimizations(invoke);
1374
1375 if (optimizations.GetDestinationIsSource()) {
1376 if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
1377 // We only support backward copying if source and destination are the same.
1378 return;
1379 }
1380 }
1381
1382 if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
1383 // We currently don't intrinsify primitive copying.
1384 return;
1385 }
1386
1387 ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
1388 LocationSummary* locations = new (allocator) LocationSummary(invoke,
1389 LocationSummary::kCallOnSlowPath,
1390 kIntrinsified);
1391 // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
1392 locations->SetInAt(0, Location::RequiresRegister());
1393 locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
1394 locations->SetInAt(2, Location::RequiresRegister());
1395 locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
1396 locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
1397
1398 locations->AddTemp(Location::RequiresRegister());
1399 locations->AddTemp(Location::RequiresRegister());
1400 locations->AddTemp(Location::RequiresRegister());
1401 }
1402
GetReferenceSlowFlagOffset() const1403 uint32_t CodeGenerator::GetReferenceSlowFlagOffset() const {
1404 ScopedObjectAccess soa(Thread::Current());
1405 mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
1406 DCHECK(klass->IsInitialized());
1407 return klass->GetSlowPathFlagOffset().Uint32Value();
1408 }
1409
GetReferenceDisableFlagOffset() const1410 uint32_t CodeGenerator::GetReferenceDisableFlagOffset() const {
1411 ScopedObjectAccess soa(Thread::Current());
1412 mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
1413 DCHECK(klass->IsInitialized());
1414 return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
1415 }
1416
EmitJitRoots(uint8_t * code,Handle<mirror::ObjectArray<mirror::Object>> roots,const uint8_t * roots_data)1417 void CodeGenerator::EmitJitRoots(uint8_t* code,
1418 Handle<mirror::ObjectArray<mirror::Object>> roots,
1419 const uint8_t* roots_data) {
1420 DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
1421 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1422 size_t index = 0;
1423 for (auto& entry : jit_string_roots_) {
1424 // Update the `roots` with the string, and replace the address temporarily
1425 // stored to the index in the table.
1426 uint64_t address = entry.second;
1427 roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
1428 DCHECK(roots->Get(index) != nullptr);
1429 entry.second = index;
1430 // Ensure the string is strongly interned. This is a requirement on how the JIT
1431 // handles strings. b/32995596
1432 class_linker->GetInternTable()->InternStrong(
1433 reinterpret_cast<mirror::String*>(roots->Get(index)));
1434 ++index;
1435 }
1436 for (auto& entry : jit_class_roots_) {
1437 // Update the `roots` with the class, and replace the address temporarily
1438 // stored to the index in the table.
1439 uint64_t address = entry.second;
1440 roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
1441 DCHECK(roots->Get(index) != nullptr);
1442 entry.second = index;
1443 ++index;
1444 }
1445 EmitJitRootPatches(code, roots_data);
1446 }
1447
GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass)1448 QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
1449 ScopedObjectAccess soa(Thread::Current());
1450 if (array_klass == nullptr) {
1451 // This can only happen for non-primitive arrays, as primitive arrays can always
1452 // be resolved.
1453 return kQuickAllocArrayResolved32;
1454 }
1455
1456 switch (array_klass->GetComponentSize()) {
1457 case 1: return kQuickAllocArrayResolved8;
1458 case 2: return kQuickAllocArrayResolved16;
1459 case 4: return kQuickAllocArrayResolved32;
1460 case 8: return kQuickAllocArrayResolved64;
1461 }
1462 LOG(FATAL) << "Unreachable";
1463 return kQuickAllocArrayResolved;
1464 }
1465
1466 } // namespace art
1467