1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
18 #define ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
19 
20 #include "code_generator.h"
21 #include "instruction_simplifier_shared.h"
22 #include "locations.h"
23 #include "nodes.h"
24 #include "utils/arm64/assembler_arm64.h"
25 
26 // TODO(VIXL): Make VIXL compile with -Wshadow.
27 #pragma GCC diagnostic push
28 #pragma GCC diagnostic ignored "-Wshadow"
29 #include "aarch64/disasm-aarch64.h"
30 #include "aarch64/macro-assembler-aarch64.h"
31 #include "aarch64/simulator-aarch64.h"
32 #pragma GCC diagnostic pop
33 
34 namespace art {
35 
36 using helpers::CanFitInShifterOperand;
37 using helpers::HasShifterOperand;
38 
39 namespace arm64 {
40 namespace helpers {
41 
42 // Convenience helpers to ease conversion to and from VIXL operands.
43 static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
44               "Unexpected values for register codes.");
45 
VIXLRegCodeFromART(int code)46 inline int VIXLRegCodeFromART(int code) {
47   if (code == SP) {
48     return vixl::aarch64::kSPRegInternalCode;
49   }
50   if (code == XZR) {
51     return vixl::aarch64::kZeroRegCode;
52   }
53   return code;
54 }
55 
ARTRegCodeFromVIXL(int code)56 inline int ARTRegCodeFromVIXL(int code) {
57   if (code == vixl::aarch64::kSPRegInternalCode) {
58     return SP;
59   }
60   if (code == vixl::aarch64::kZeroRegCode) {
61     return XZR;
62   }
63   return code;
64 }
65 
XRegisterFrom(Location location)66 inline vixl::aarch64::Register XRegisterFrom(Location location) {
67   DCHECK(location.IsRegister()) << location;
68   return vixl::aarch64::XRegister(VIXLRegCodeFromART(location.reg()));
69 }
70 
WRegisterFrom(Location location)71 inline vixl::aarch64::Register WRegisterFrom(Location location) {
72   DCHECK(location.IsRegister()) << location;
73   return vixl::aarch64::WRegister(VIXLRegCodeFromART(location.reg()));
74 }
75 
RegisterFrom(Location location,DataType::Type type)76 inline vixl::aarch64::Register RegisterFrom(Location location, DataType::Type type) {
77   DCHECK(type != DataType::Type::kVoid && !DataType::IsFloatingPointType(type)) << type;
78   return type == DataType::Type::kInt64 ? XRegisterFrom(location) : WRegisterFrom(location);
79 }
80 
OutputRegister(HInstruction * instr)81 inline vixl::aarch64::Register OutputRegister(HInstruction* instr) {
82   return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
83 }
84 
InputRegisterAt(HInstruction * instr,int input_index)85 inline vixl::aarch64::Register InputRegisterAt(HInstruction* instr, int input_index) {
86   return RegisterFrom(instr->GetLocations()->InAt(input_index),
87                       instr->InputAt(input_index)->GetType());
88 }
89 
DRegisterFrom(Location location)90 inline vixl::aarch64::VRegister DRegisterFrom(Location location) {
91   DCHECK(location.IsFpuRegister()) << location;
92   return vixl::aarch64::DRegister(location.reg());
93 }
94 
QRegisterFrom(Location location)95 inline vixl::aarch64::VRegister QRegisterFrom(Location location) {
96   DCHECK(location.IsFpuRegister()) << location;
97   return vixl::aarch64::QRegister(location.reg());
98 }
99 
VRegisterFrom(Location location)100 inline vixl::aarch64::VRegister VRegisterFrom(Location location) {
101   DCHECK(location.IsFpuRegister()) << location;
102   return vixl::aarch64::VRegister(location.reg());
103 }
104 
ZRegisterFrom(Location location)105 inline vixl::aarch64::ZRegister ZRegisterFrom(Location location) {
106   DCHECK(location.IsFpuRegister()) << location;
107   return vixl::aarch64::ZRegister(location.reg());
108 }
109 
SRegisterFrom(Location location)110 inline vixl::aarch64::VRegister SRegisterFrom(Location location) {
111   DCHECK(location.IsFpuRegister()) << location;
112   return vixl::aarch64::SRegister(location.reg());
113 }
114 
HRegisterFrom(Location location)115 inline vixl::aarch64::VRegister HRegisterFrom(Location location) {
116   DCHECK(location.IsFpuRegister()) << location;
117   return vixl::aarch64::HRegister(location.reg());
118 }
119 
FPRegisterFrom(Location location,DataType::Type type)120 inline vixl::aarch64::VRegister FPRegisterFrom(Location location, DataType::Type type) {
121   DCHECK(DataType::IsFloatingPointType(type)) << type;
122   return type == DataType::Type::kFloat64 ? DRegisterFrom(location) : SRegisterFrom(location);
123 }
124 
OutputFPRegister(HInstruction * instr)125 inline vixl::aarch64::VRegister OutputFPRegister(HInstruction* instr) {
126   return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
127 }
128 
InputFPRegisterAt(HInstruction * instr,int input_index)129 inline vixl::aarch64::VRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
130   return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
131                         instr->InputAt(input_index)->GetType());
132 }
133 
CPURegisterFrom(Location location,DataType::Type type)134 inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, DataType::Type type) {
135   return DataType::IsFloatingPointType(type)
136       ? vixl::aarch64::CPURegister(FPRegisterFrom(location, type))
137       : vixl::aarch64::CPURegister(RegisterFrom(location, type));
138 }
139 
OutputCPURegister(HInstruction * instr)140 inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) {
141   return DataType::IsFloatingPointType(instr->GetType())
142       ? static_cast<vixl::aarch64::CPURegister>(OutputFPRegister(instr))
143       : static_cast<vixl::aarch64::CPURegister>(OutputRegister(instr));
144 }
145 
InputCPURegisterAt(HInstruction * instr,int index)146 inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
147   return DataType::IsFloatingPointType(instr->InputAt(index)->GetType())
148       ? static_cast<vixl::aarch64::CPURegister>(InputFPRegisterAt(instr, index))
149       : static_cast<vixl::aarch64::CPURegister>(InputRegisterAt(instr, index));
150 }
151 
InputCPURegisterOrZeroRegAt(HInstruction * instr,int index)152 inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* instr,
153                                                                      int index) {
154   HInstruction* input = instr->InputAt(index);
155   DataType::Type input_type = input->GetType();
156   if (input->IsConstant() && input->AsConstant()->IsZeroBitPattern()) {
157     return (DataType::Size(input_type) >= vixl::aarch64::kXRegSizeInBytes)
158         ? vixl::aarch64::Register(vixl::aarch64::xzr)
159         : vixl::aarch64::Register(vixl::aarch64::wzr);
160   }
161   return InputCPURegisterAt(instr, index);
162 }
163 
Int64FromLocation(Location location)164 inline int64_t Int64FromLocation(Location location) {
165   return Int64FromConstant(location.GetConstant());
166 }
167 
OperandFrom(Location location,DataType::Type type)168 inline vixl::aarch64::Operand OperandFrom(Location location, DataType::Type type) {
169   if (location.IsRegister()) {
170     return vixl::aarch64::Operand(RegisterFrom(location, type));
171   } else {
172     return vixl::aarch64::Operand(Int64FromLocation(location));
173   }
174 }
175 
InputOperandAt(HInstruction * instr,int input_index)176 inline vixl::aarch64::Operand InputOperandAt(HInstruction* instr, int input_index) {
177   return OperandFrom(instr->GetLocations()->InAt(input_index),
178                      instr->InputAt(input_index)->GetType());
179 }
180 
StackOperandFrom(Location location)181 inline vixl::aarch64::MemOperand StackOperandFrom(Location location) {
182   return vixl::aarch64::MemOperand(vixl::aarch64::sp, location.GetStackIndex());
183 }
184 
SveStackOperandFrom(Location location)185 inline vixl::aarch64::SVEMemOperand SveStackOperandFrom(Location location) {
186   return vixl::aarch64::SVEMemOperand(vixl::aarch64::sp, location.GetStackIndex());
187 }
188 
189 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
190                                                     size_t offset = 0) {
191   // A heap reference must be 32bit, so fit in a W register.
192   DCHECK(base.IsW());
193   return vixl::aarch64::MemOperand(base.X(), offset);
194 }
195 
196 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
197                                                     const vixl::aarch64::Register& regoffset,
198                                                     vixl::aarch64::Shift shift = vixl::aarch64::LSL,
199                                                     unsigned shift_amount = 0) {
200   // A heap reference must be 32bit, so fit in a W register.
201   DCHECK(base.IsW());
202   return vixl::aarch64::MemOperand(base.X(), regoffset, shift, shift_amount);
203 }
204 
HeapOperand(const vixl::aarch64::Register & base,Offset offset)205 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
206                                                     Offset offset) {
207   return HeapOperand(base, offset.SizeValue());
208 }
209 
HeapOperandFrom(Location location,Offset offset)210 inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) {
211   return HeapOperand(RegisterFrom(location, DataType::Type::kReference), offset);
212 }
213 
LocationFrom(const vixl::aarch64::Register & reg)214 inline Location LocationFrom(const vixl::aarch64::Register& reg) {
215   return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.GetCode()));
216 }
217 
LocationFrom(const vixl::aarch64::VRegister & fpreg)218 inline Location LocationFrom(const vixl::aarch64::VRegister& fpreg) {
219   return Location::FpuRegisterLocation(fpreg.GetCode());
220 }
221 
LocationFrom(const vixl::aarch64::ZRegister & zreg)222 inline Location LocationFrom(const vixl::aarch64::ZRegister& zreg) {
223   return Location::FpuRegisterLocation(zreg.GetCode());
224 }
225 
OperandFromMemOperand(const vixl::aarch64::MemOperand & mem_op)226 inline vixl::aarch64::Operand OperandFromMemOperand(
227     const vixl::aarch64::MemOperand& mem_op) {
228   if (mem_op.IsImmediateOffset()) {
229     return vixl::aarch64::Operand(mem_op.GetOffset());
230   } else {
231     DCHECK(mem_op.IsRegisterOffset());
232     if (mem_op.GetExtend() != vixl::aarch64::NO_EXTEND) {
233       return vixl::aarch64::Operand(mem_op.GetRegisterOffset(),
234                                     mem_op.GetExtend(),
235                                     mem_op.GetShiftAmount());
236     } else if (mem_op.GetShift() != vixl::aarch64::NO_SHIFT) {
237       return vixl::aarch64::Operand(mem_op.GetRegisterOffset(),
238                                     mem_op.GetShift(),
239                                     mem_op.GetShiftAmount());
240     } else {
241       LOG(FATAL) << "Should not reach here";
242       UNREACHABLE();
243     }
244   }
245 }
246 
AddSubCanEncodeAsImmediate(int64_t value)247 inline bool AddSubCanEncodeAsImmediate(int64_t value) {
248   // If `value` does not fit but `-value` does, VIXL will automatically use
249   // the 'opposite' instruction.
250   return vixl::aarch64::Assembler::IsImmAddSub(value)
251       || vixl::aarch64::Assembler::IsImmAddSub(-value);
252 }
253 
Arm64CanEncodeConstantAsImmediate(HConstant * constant,HInstruction * instr)254 inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
255   int64_t value = CodeGenerator::GetInt64ValueOf(constant);
256 
257   // TODO: Improve this when IsSIMDConstantEncodable method is implemented in VIXL.
258   if (instr->IsVecReplicateScalar()) {
259     if (constant->IsLongConstant()) {
260       return false;
261     } else if (constant->IsFloatConstant()) {
262       return vixl::aarch64::Assembler::IsImmFP32(constant->AsFloatConstant()->GetValue());
263     } else if (constant->IsDoubleConstant()) {
264       return vixl::aarch64::Assembler::IsImmFP64(constant->AsDoubleConstant()->GetValue());
265     }
266     return IsUint<8>(value);
267   }
268 
269   // Code generation for Min/Max:
270   //    Cmp left_op, right_op
271   //    Csel dst, left_op, right_op, cond
272   if (instr->IsMin() || instr->IsMax()) {
273     if (constant->GetUses().HasExactlyOneElement()) {
274       // If value can be encoded as immediate for the Cmp, then let VIXL handle
275       // the constant generation for the Csel.
276       return AddSubCanEncodeAsImmediate(value);
277     }
278     // These values are encodable as immediates for Cmp and VIXL will use csinc and csinv
279     // with the zr register as right_op, hence no constant generation is required.
280     return constant->IsZeroBitPattern() || constant->IsOne() || constant->IsMinusOne();
281   }
282 
283   // For single uses we let VIXL handle the constant generation since it will
284   // use registers that are not managed by the register allocator (wip0, wip1).
285   if (constant->GetUses().HasExactlyOneElement()) {
286     return true;
287   }
288 
289   // Our code generator ensures shift distances are within an encodable range.
290   if (instr->IsRor()) {
291     return true;
292   }
293 
294   if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
295     // Uses logical operations.
296     return vixl::aarch64::Assembler::IsImmLogical(value, vixl::aarch64::kXRegSize);
297   } else if (instr->IsNeg()) {
298     // Uses mov -immediate.
299     return vixl::aarch64::Assembler::IsImmMovn(value, vixl::aarch64::kXRegSize);
300   } else {
301     DCHECK(instr->IsAdd() ||
302            instr->IsIntermediateAddress() ||
303            instr->IsBoundsCheck() ||
304            instr->IsCompare() ||
305            instr->IsCondition() ||
306            instr->IsSub())
307         << instr->DebugName();
308     // Uses aliases of ADD/SUB instructions.
309     return AddSubCanEncodeAsImmediate(value);
310   }
311 }
312 
ARM64EncodableConstantOrRegister(HInstruction * constant,HInstruction * instr)313 inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
314                                                  HInstruction* instr) {
315   if (constant->IsConstant()
316       && Arm64CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
317     return Location::ConstantLocation(constant->AsConstant());
318   }
319 
320   return Location::RequiresRegister();
321 }
322 
323 // Check if registers in art register set have the same register code in vixl. If the register
324 // codes are same, we can initialize vixl register list simply by the register masks. Currently,
325 // only SP/WSP and ZXR/WZR codes are different between art and vixl.
326 // Note: This function is only used for debug checks.
ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,size_t num_core,uint32_t art_fpu_registers,size_t num_fpu)327 inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
328                                             size_t num_core,
329                                             uint32_t art_fpu_registers,
330                                             size_t num_fpu) {
331   // The register masks won't work if the number of register is larger than 32.
332   DCHECK_GE(sizeof(art_core_registers) * 8, num_core);
333   DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu);
334   for (size_t art_reg_code = 0;  art_reg_code < num_core; ++art_reg_code) {
335     if (RegisterSet::Contains(art_core_registers, art_reg_code)) {
336       if (art_reg_code != static_cast<size_t>(VIXLRegCodeFromART(art_reg_code))) {
337         return false;
338       }
339     }
340   }
341   // There is no register code translation for float registers.
342   return true;
343 }
344 
ShiftFromOpKind(HDataProcWithShifterOp::OpKind op_kind)345 inline vixl::aarch64::Shift ShiftFromOpKind(HDataProcWithShifterOp::OpKind op_kind) {
346   switch (op_kind) {
347     case HDataProcWithShifterOp::kASR: return vixl::aarch64::ASR;
348     case HDataProcWithShifterOp::kLSL: return vixl::aarch64::LSL;
349     case HDataProcWithShifterOp::kLSR: return vixl::aarch64::LSR;
350     default:
351       LOG(FATAL) << "Unexpected op kind " << op_kind;
352       UNREACHABLE();
353       return vixl::aarch64::NO_SHIFT;
354   }
355 }
356 
ExtendFromOpKind(HDataProcWithShifterOp::OpKind op_kind)357 inline vixl::aarch64::Extend ExtendFromOpKind(HDataProcWithShifterOp::OpKind op_kind) {
358   switch (op_kind) {
359     case HDataProcWithShifterOp::kUXTB: return vixl::aarch64::UXTB;
360     case HDataProcWithShifterOp::kUXTH: return vixl::aarch64::UXTH;
361     case HDataProcWithShifterOp::kUXTW: return vixl::aarch64::UXTW;
362     case HDataProcWithShifterOp::kSXTB: return vixl::aarch64::SXTB;
363     case HDataProcWithShifterOp::kSXTH: return vixl::aarch64::SXTH;
364     case HDataProcWithShifterOp::kSXTW: return vixl::aarch64::SXTW;
365     default:
366       LOG(FATAL) << "Unexpected op kind " << op_kind;
367       UNREACHABLE();
368       return vixl::aarch64::NO_EXTEND;
369   }
370 }
371 
ShifterOperandSupportsExtension(HInstruction * instruction)372 inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
373   DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64));
374   // Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
375   // does *not* support extension. This is because the `extended register` form
376   // of the `sub` instruction interprets the left register with code 31 as the
377   // stack pointer and not the zero register. (So does the `immediate` form.) In
378   // the other form `shifted register, the register with code 31 is interpreted
379   // as the zero register.
380   return instruction->IsAdd() || instruction->IsSub();
381 }
382 
IsConstantZeroBitPattern(const HInstruction * instruction)383 inline bool IsConstantZeroBitPattern(const HInstruction* instruction) {
384   return instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern();
385 }
386 
387 }  // namespace helpers
388 }  // namespace arm64
389 }  // namespace art
390 
391 #endif  // ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
392