1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
18 #define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
19
20 #include <android-base/logging.h>
21
22 #include "base/macros.h"
23 #include "constants_arm.h"
24 #include "dwarf/register.h"
25 #include "offsets.h"
26 #include "utils/arm/managed_register_arm.h"
27 #include "utils/assembler.h"
28
29 // TODO(VIXL): Make VIXL compile with -Wshadow and remove pragmas.
30 #pragma GCC diagnostic push
31 #pragma GCC diagnostic ignored "-Wshadow"
32 #include "aarch32/macro-assembler-aarch32.h"
33 #pragma GCC diagnostic pop
34
35 namespace vixl32 = vixl::aarch32;
36
37 namespace art HIDDEN {
38 namespace arm {
39
DWARFReg(vixl32::Register reg)40 inline dwarf::Reg DWARFReg(vixl32::Register reg) {
41 return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
42 }
43
DWARFReg(vixl32::SRegister reg)44 inline dwarf::Reg DWARFReg(vixl32::SRegister reg) {
45 return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
46 }
47
48 enum LoadOperandType {
49 kLoadSignedByte,
50 kLoadUnsignedByte,
51 kLoadSignedHalfword,
52 kLoadUnsignedHalfword,
53 kLoadWord,
54 kLoadWordPair,
55 kLoadSWord,
56 kLoadDWord
57 };
58
59 enum StoreOperandType {
60 kStoreByte,
61 kStoreHalfword,
62 kStoreWord,
63 kStoreWordPair,
64 kStoreSWord,
65 kStoreDWord
66 };
67
68 class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler {
69 public:
70 // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
71 // fewer system calls than a larger default capacity.
72 static constexpr size_t kDefaultCodeBufferCapacity = 1 * KB;
73
ArmVIXLMacroAssembler()74 ArmVIXLMacroAssembler()
75 : vixl32::MacroAssembler(ArmVIXLMacroAssembler::kDefaultCodeBufferCapacity) {}
76
77 // The following interfaces can generate CMP+Bcc or Cbz/Cbnz.
78 // CMP+Bcc are generated by default.
79 // If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz,
80 // then Cbz/Cbnz is generated.
81 // Prefer following interfaces to using vixl32::MacroAssembler::Cbz/Cbnz.
82 // In T32, Cbz/Cbnz instructions have following limitations:
83 // - Far targets, which are over 126 bytes away, are not supported.
84 // - Only low registers can be encoded.
85 // - Backward branches are not supported.
86 void CompareAndBranchIfZero(vixl32::Register rn,
87 vixl32::Label* label,
88 bool is_far_target = true);
89 void CompareAndBranchIfNonZero(vixl32::Register rn,
90 vixl32::Label* label,
91 bool is_far_target = true);
92
93 // In T32 some of the instructions (add, mov, etc) outside an IT block
94 // have only 32-bit encodings. But there are 16-bit flag setting
95 // versions of these instructions (adds, movs, etc). In most of the
96 // cases in ART we don't care if the instructions keep flags or not;
97 // thus we can benefit from smaller code size.
98 // VIXL will never generate flag setting versions (for example, adds
99 // for Add macro instruction) unless vixl32::DontCare option is
100 // explicitly specified. That's why we introduce wrappers to use
101 // DontCare option by default.
102 #define WITH_FLAGS_DONT_CARE_RD_RN_OP(func_name) \
103 void (func_name)(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) { \
104 MacroAssembler::func_name(vixl32::DontCare, rd, rn, operand); \
105 } \
106 using MacroAssembler::func_name
107
108 WITH_FLAGS_DONT_CARE_RD_RN_OP(Adc);
109 WITH_FLAGS_DONT_CARE_RD_RN_OP(Sub);
110 WITH_FLAGS_DONT_CARE_RD_RN_OP(Sbc);
111 WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsb);
112 WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsc);
113
114 WITH_FLAGS_DONT_CARE_RD_RN_OP(Eor);
115 WITH_FLAGS_DONT_CARE_RD_RN_OP(Orr);
116 WITH_FLAGS_DONT_CARE_RD_RN_OP(Orn);
117 WITH_FLAGS_DONT_CARE_RD_RN_OP(And);
118 WITH_FLAGS_DONT_CARE_RD_RN_OP(Bic);
119
120 WITH_FLAGS_DONT_CARE_RD_RN_OP(Asr);
121 WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsr);
122 WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsl);
123 WITH_FLAGS_DONT_CARE_RD_RN_OP(Ror);
124
125 #undef WITH_FLAGS_DONT_CARE_RD_RN_OP
126
127 #define WITH_FLAGS_DONT_CARE_RD_OP(func_name) \
128 void (func_name)(vixl32::Register rd, const vixl32::Operand& operand) { \
129 MacroAssembler::func_name(vixl32::DontCare, rd, operand); \
130 } \
131 using MacroAssembler::func_name
132
133 WITH_FLAGS_DONT_CARE_RD_OP(Mvn);
134 WITH_FLAGS_DONT_CARE_RD_OP(Mov);
135
136 #undef WITH_FLAGS_DONT_CARE_RD_OP
137
138 // The following two functions don't fall into above categories. Overload them separately.
Rrx(vixl32::Register rd,vixl32::Register rn)139 void Rrx(vixl32::Register rd, vixl32::Register rn) {
140 MacroAssembler::Rrx(vixl32::DontCare, rd, rn);
141 }
142 using MacroAssembler::Rrx;
143
Mul(vixl32::Register rd,vixl32::Register rn,vixl32::Register rm)144 void Mul(vixl32::Register rd, vixl32::Register rn, vixl32::Register rm) {
145 MacroAssembler::Mul(vixl32::DontCare, rd, rn, rm);
146 }
147 using MacroAssembler::Mul;
148
149 // TODO: Remove when MacroAssembler::Add(FlagsUpdate, Condition, Register, Register, Operand)
150 // makes the right decision about 16-bit encodings.
Add(vixl32::Register rd,vixl32::Register rn,const vixl32::Operand & operand)151 void Add(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) {
152 if (rd.Is(rn) && operand.IsPlainRegister()) {
153 MacroAssembler::Add(rd, rn, operand);
154 } else {
155 MacroAssembler::Add(vixl32::DontCare, rd, rn, operand);
156 }
157 }
158 using MacroAssembler::Add;
159
160 // These interfaces try to use 16-bit T2 encoding of B instruction.
161 void B(vixl32::Label* label);
162 // For B(label), we always try to use Narrow encoding, because 16-bit T2 encoding supports
163 // jumping within 2KB range. For B(cond, label), because the supported branch range is 256
164 // bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps.
165 void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true);
166
167 // Use literal for generating double constant if it doesn't fit VMOV encoding.
Vmov(vixl32::DRegister rd,double imm)168 void Vmov(vixl32::DRegister rd, double imm) {
169 if (vixl::VFP::IsImmFP64(imm)) {
170 MacroAssembler::Vmov(rd, imm);
171 } else {
172 MacroAssembler::Vldr(rd, imm);
173 }
174 }
175 using MacroAssembler::Vmov;
176
177 // TODO(b/281982421): Move the implementation of Mrrc to vixl and remove this implementation.
Mrrc(vixl32::Register r1,vixl32::Register r2,int coproc,int opc1,int crm)178 void Mrrc(vixl32::Register r1, vixl32::Register r2, int coproc, int opc1, int crm) {
179 // See ARM A-profile A32/T32 Instruction set architecture
180 // https://developer.arm.com/documentation/ddi0597/2022-09/Base-Instructions/MRRC--Move-to-two-general-purpose-registers-from-System-register-
181 CHECK(coproc == 15 || coproc == 14);
182 if (IsUsingT32()) {
183 uint32_t inst = (0b111011000101 << 20) |
184 (r2.GetCode() << 16) |
185 (r1.GetCode() << 12) |
186 (coproc << 8) |
187 (opc1 << 4) |
188 crm;
189 EmitT32_32(inst);
190 } else {
191 uint32_t inst = (0b000011000101 << 20) |
192 (r2.GetCode() << 16) |
193 (r1.GetCode() << 12) |
194 (coproc << 8) |
195 (opc1 << 4) |
196 crm;
197 EmitA32(inst);
198 }
199 }
200 };
201
202 class ArmVIXLAssembler final : public Assembler {
203 private:
204 class ArmException;
205 public:
ArmVIXLAssembler(ArenaAllocator * allocator)206 explicit ArmVIXLAssembler(ArenaAllocator* allocator)
207 : Assembler(allocator) {
208 // Use Thumb2 instruction set.
209 vixl_masm_.UseT32();
210 }
211
~ArmVIXLAssembler()212 virtual ~ArmVIXLAssembler() {}
GetVIXLAssembler()213 ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
214 void FinalizeCode() override;
215
216 // Size of generated code.
217 size_t CodeSize() const override;
218 const uint8_t* CodeBufferBaseAddress() const override;
219
220 // Copy instructions out of assembly buffer into the given region of memory.
221 void CopyInstructions(const MemoryRegion& region) override;
222
Bind(Label * label)223 void Bind([[maybe_unused]] Label* label) override {
224 UNIMPLEMENTED(FATAL) << "Do not use Bind(Label*) for ARM";
225 }
Jump(Label * label)226 void Jump([[maybe_unused]] Label* label) override {
227 UNIMPLEMENTED(FATAL) << "Do not use Jump(Label*) for ARM";
228 }
229
Bind(vixl::aarch32::Label * label)230 void Bind(vixl::aarch32::Label* label) {
231 vixl_masm_.Bind(label);
232 }
Jump(vixl::aarch32::Label * label)233 void Jump(vixl::aarch32::Label* label) {
234 vixl_masm_.B(label);
235 }
236
237 //
238 // Heap poisoning.
239 //
240
241 // Poison a heap reference contained in `reg`.
242 void PoisonHeapReference(vixl32::Register reg);
243 // Unpoison a heap reference contained in `reg`.
244 void UnpoisonHeapReference(vixl32::Register reg);
245 // Poison a heap reference contained in `reg` if heap poisoning is enabled.
246 void MaybePoisonHeapReference(vixl32::Register reg);
247 // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
248 void MaybeUnpoisonHeapReference(vixl32::Register reg);
249
250 // Emit code checking the status of the Marking Register, and aborting
251 // the program if MR does not match the value stored in the art::Thread
252 // object.
253 //
254 // Argument `temp` is used as a temporary register to generate code.
255 // Argument `code` is used to identify the different occurrences of
256 // MaybeGenerateMarkingRegisterCheck and is passed to the BKPT instruction.
257 void GenerateMarkingRegisterCheck(vixl32::Register temp, int code = 0);
258
259 void StoreToOffset(StoreOperandType type,
260 vixl32::Register reg,
261 vixl32::Register base,
262 int32_t offset);
263 void StoreSToOffset(vixl32::SRegister source, vixl32::Register base, int32_t offset);
264 void StoreDToOffset(vixl32::DRegister source, vixl32::Register base, int32_t offset);
265
266 void LoadImmediate(vixl32::Register dest, int32_t value);
267 void LoadFromOffset(LoadOperandType type,
268 vixl32::Register reg,
269 vixl32::Register base,
270 int32_t offset);
271 void LoadSFromOffset(vixl32::SRegister reg, vixl32::Register base, int32_t offset);
272 void LoadDFromOffset(vixl32::DRegister reg, vixl32::Register base, int32_t offset);
273
274 void LoadRegisterList(RegList regs, size_t stack_offset);
275 void StoreRegisterList(RegList regs, size_t stack_offset);
276
277 bool ShifterOperandCanAlwaysHold(uint32_t immediate);
278 bool ShifterOperandCanHold(Opcode opcode,
279 uint32_t immediate,
280 vixl::aarch32::FlagsUpdate update_flags = vixl::aarch32::DontCare);
281 bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
282 int32_t offset,
283 /*out*/ int32_t* add_to_base,
284 /*out*/ int32_t* offset_for_load_store);
285 int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
286 vixl32::Register temp,
287 vixl32::Register base,
288 int32_t offset);
289 int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
290 int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
291
292 void AddConstant(vixl32::Register rd, int32_t value);
293 void AddConstant(vixl32::Register rd, vixl32::Register rn, int32_t value);
294 void AddConstantInIt(vixl32::Register rd,
295 vixl32::Register rn,
296 int32_t value,
297 vixl32::Condition cond = vixl32::al);
298
299 template <typename T>
CreateLiteralDestroyedWithPool(T value)300 vixl::aarch32::Literal<T>* CreateLiteralDestroyedWithPool(T value) {
301 vixl::aarch32::Literal<T>* literal =
302 new vixl::aarch32::Literal<T>(value,
303 vixl32::RawLiteral::kPlacedWhenUsed,
304 vixl32::RawLiteral::kDeletedOnPoolDestruction);
305 return literal;
306 }
307
308 private:
309 // VIXL assembler.
310 ArmVIXLMacroAssembler vixl_masm_;
311 };
312
313 // Thread register declaration.
314 extern const vixl32::Register tr;
315 // Marking register declaration.
316 extern const vixl32::Register mr;
317
318 } // namespace arm
319 } // namespace art
320
321 #endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
322