1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
18 #define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
19 
20 #include <android-base/logging.h>
21 
22 #include "base/arena_containers.h"
23 #include "base/macros.h"
24 #include "constants_arm.h"
25 #include "offsets.h"
26 #include "utils/arm/assembler_arm_shared.h"
27 #include "utils/arm/managed_register_arm.h"
28 #include "utils/assembler.h"
29 #include "utils/jni_macro_assembler.h"
30 
31 // TODO(VIXL): Make VIXL compile with -Wshadow and remove pragmas.
32 #pragma GCC diagnostic push
33 #pragma GCC diagnostic ignored "-Wshadow"
34 #include "aarch32/macro-assembler-aarch32.h"
35 #pragma GCC diagnostic pop
36 
37 namespace vixl32 = vixl::aarch32;
38 
39 namespace art {
40 namespace arm {
41 
42 class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler {
43  public:
44   // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
45   // fewer system calls than a larger default capacity.
46   static constexpr size_t kDefaultCodeBufferCapacity = 1 * KB;
47 
ArmVIXLMacroAssembler()48   ArmVIXLMacroAssembler()
49       : vixl32::MacroAssembler(ArmVIXLMacroAssembler::kDefaultCodeBufferCapacity) {}
50 
51   // The following interfaces can generate CMP+Bcc or Cbz/Cbnz.
52   // CMP+Bcc are generated by default.
53   // If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz,
54   // then Cbz/Cbnz is generated.
55   // Prefer following interfaces to using vixl32::MacroAssembler::Cbz/Cbnz.
56   // In T32, Cbz/Cbnz instructions have following limitations:
57   // - Far targets, which are over 126 bytes away, are not supported.
58   // - Only low registers can be encoded.
59   // - Backward branches are not supported.
60   void CompareAndBranchIfZero(vixl32::Register rn,
61                               vixl32::Label* label,
62                               bool is_far_target = true);
63   void CompareAndBranchIfNonZero(vixl32::Register rn,
64                                  vixl32::Label* label,
65                                  bool is_far_target = true);
66 
67   // In T32 some of the instructions (add, mov, etc) outside an IT block
68   // have only 32-bit encodings. But there are 16-bit flag setting
69   // versions of these instructions (adds, movs, etc). In most of the
70   // cases in ART we don't care if the instructions keep flags or not;
71   // thus we can benefit from smaller code size.
72   // VIXL will never generate flag setting versions (for example, adds
73   // for Add macro instruction) unless vixl32::DontCare option is
74   // explicitly specified. That's why we introduce wrappers to use
75   // DontCare option by default.
76 #define WITH_FLAGS_DONT_CARE_RD_RN_OP(func_name) \
77   void (func_name)(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) { \
78     MacroAssembler::func_name(vixl32::DontCare, rd, rn, operand); \
79   } \
80   using MacroAssembler::func_name
81 
82   WITH_FLAGS_DONT_CARE_RD_RN_OP(Adc);
83   WITH_FLAGS_DONT_CARE_RD_RN_OP(Sub);
84   WITH_FLAGS_DONT_CARE_RD_RN_OP(Sbc);
85   WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsb);
86   WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsc);
87 
88   WITH_FLAGS_DONT_CARE_RD_RN_OP(Eor);
89   WITH_FLAGS_DONT_CARE_RD_RN_OP(Orr);
90   WITH_FLAGS_DONT_CARE_RD_RN_OP(Orn);
91   WITH_FLAGS_DONT_CARE_RD_RN_OP(And);
92   WITH_FLAGS_DONT_CARE_RD_RN_OP(Bic);
93 
94   WITH_FLAGS_DONT_CARE_RD_RN_OP(Asr);
95   WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsr);
96   WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsl);
97   WITH_FLAGS_DONT_CARE_RD_RN_OP(Ror);
98 
99 #undef WITH_FLAGS_DONT_CARE_RD_RN_OP
100 
101 #define WITH_FLAGS_DONT_CARE_RD_OP(func_name) \
102   void (func_name)(vixl32::Register rd, const vixl32::Operand& operand) { \
103     MacroAssembler::func_name(vixl32::DontCare, rd, operand); \
104   } \
105   using MacroAssembler::func_name
106 
107   WITH_FLAGS_DONT_CARE_RD_OP(Mvn);
108   WITH_FLAGS_DONT_CARE_RD_OP(Mov);
109 
110 #undef WITH_FLAGS_DONT_CARE_RD_OP
111 
112   // The following two functions don't fall into above categories. Overload them separately.
Rrx(vixl32::Register rd,vixl32::Register rn)113   void Rrx(vixl32::Register rd, vixl32::Register rn) {
114     MacroAssembler::Rrx(vixl32::DontCare, rd, rn);
115   }
116   using MacroAssembler::Rrx;
117 
Mul(vixl32::Register rd,vixl32::Register rn,vixl32::Register rm)118   void Mul(vixl32::Register rd, vixl32::Register rn, vixl32::Register rm) {
119     MacroAssembler::Mul(vixl32::DontCare, rd, rn, rm);
120   }
121   using MacroAssembler::Mul;
122 
123   // TODO: Remove when MacroAssembler::Add(FlagsUpdate, Condition, Register, Register, Operand)
124   // makes the right decision about 16-bit encodings.
Add(vixl32::Register rd,vixl32::Register rn,const vixl32::Operand & operand)125   void Add(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) {
126     if (rd.Is(rn) && operand.IsPlainRegister()) {
127       MacroAssembler::Add(rd, rn, operand);
128     } else {
129       MacroAssembler::Add(vixl32::DontCare, rd, rn, operand);
130     }
131   }
132   using MacroAssembler::Add;
133 
134   // These interfaces try to use 16-bit T2 encoding of B instruction.
135   void B(vixl32::Label* label);
136   // For B(label), we always try to use Narrow encoding, because 16-bit T2 encoding supports
137   // jumping within 2KB range. For B(cond, label), because the supported branch range is 256
138   // bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps.
139   void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true);
140 
141   // Use literal for generating double constant if it doesn't fit VMOV encoding.
Vmov(vixl32::DRegister rd,double imm)142   void Vmov(vixl32::DRegister rd, double imm) {
143     if (vixl::VFP::IsImmFP64(imm)) {
144       MacroAssembler::Vmov(rd, imm);
145     } else {
146       MacroAssembler::Vldr(rd, imm);
147     }
148   }
149   using MacroAssembler::Vmov;
150 };
151 
152 class ArmVIXLAssembler final : public Assembler {
153  private:
154   class ArmException;
155  public:
ArmVIXLAssembler(ArenaAllocator * allocator)156   explicit ArmVIXLAssembler(ArenaAllocator* allocator)
157       : Assembler(allocator) {
158     // Use Thumb2 instruction set.
159     vixl_masm_.UseT32();
160   }
161 
~ArmVIXLAssembler()162   virtual ~ArmVIXLAssembler() {}
GetVIXLAssembler()163   ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
164   void FinalizeCode() override;
165 
166   // Size of generated code.
167   size_t CodeSize() const override;
168   const uint8_t* CodeBufferBaseAddress() const override;
169 
170   // Copy instructions out of assembly buffer into the given region of memory.
171   void FinalizeInstructions(const MemoryRegion& region) override;
172 
Bind(Label * label ATTRIBUTE_UNUSED)173   void Bind(Label* label ATTRIBUTE_UNUSED) override {
174     UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM";
175   }
Jump(Label * label ATTRIBUTE_UNUSED)176   void Jump(Label* label ATTRIBUTE_UNUSED) override {
177     UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM";
178   }
179 
180   //
181   // Heap poisoning.
182   //
183 
184   // Poison a heap reference contained in `reg`.
185   void PoisonHeapReference(vixl32::Register reg);
186   // Unpoison a heap reference contained in `reg`.
187   void UnpoisonHeapReference(vixl32::Register reg);
188   // Poison a heap reference contained in `reg` if heap poisoning is enabled.
189   void MaybePoisonHeapReference(vixl32::Register reg);
190   // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
191   void MaybeUnpoisonHeapReference(vixl32::Register reg);
192 
193   // Emit code checking the status of the Marking Register, and aborting
194   // the program if MR does not match the value stored in the art::Thread
195   // object.
196   //
197   // Argument `temp` is used as a temporary register to generate code.
198   // Argument `code` is used to identify the different occurrences of
199   // MaybeGenerateMarkingRegisterCheck and is passed to the BKPT instruction.
200   void GenerateMarkingRegisterCheck(vixl32::Register temp, int code = 0);
201 
202   void StoreToOffset(StoreOperandType type,
203                      vixl32::Register reg,
204                      vixl32::Register base,
205                      int32_t offset);
206   void StoreSToOffset(vixl32::SRegister source, vixl32::Register base, int32_t offset);
207   void StoreDToOffset(vixl32::DRegister source, vixl32::Register base, int32_t offset);
208 
209   void LoadImmediate(vixl32::Register dest, int32_t value);
210   void LoadFromOffset(LoadOperandType type,
211                       vixl32::Register reg,
212                       vixl32::Register base,
213                       int32_t offset);
214   void LoadSFromOffset(vixl32::SRegister reg, vixl32::Register base, int32_t offset);
215   void LoadDFromOffset(vixl32::DRegister reg, vixl32::Register base, int32_t offset);
216 
217   void LoadRegisterList(RegList regs, size_t stack_offset);
218   void StoreRegisterList(RegList regs, size_t stack_offset);
219 
220   bool ShifterOperandCanAlwaysHold(uint32_t immediate);
221   bool ShifterOperandCanHold(Opcode opcode,
222                              uint32_t immediate,
223                              vixl::aarch32::FlagsUpdate update_flags = vixl::aarch32::DontCare);
224   bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
225                                int32_t offset,
226                                /*out*/ int32_t* add_to_base,
227                                /*out*/ int32_t* offset_for_load_store);
228   int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
229                                 vixl32::Register temp,
230                                 vixl32::Register base,
231                                 int32_t offset);
232   int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
233   int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
234 
235   void AddConstant(vixl32::Register rd, int32_t value);
236   void AddConstant(vixl32::Register rd, vixl32::Register rn, int32_t value);
237   void AddConstantInIt(vixl32::Register rd,
238                        vixl32::Register rn,
239                        int32_t value,
240                        vixl32::Condition cond = vixl32::al);
241 
242   template <typename T>
CreateLiteralDestroyedWithPool(T value)243   vixl::aarch32::Literal<T>* CreateLiteralDestroyedWithPool(T value) {
244     vixl::aarch32::Literal<T>* literal =
245         new vixl::aarch32::Literal<T>(value,
246                                       vixl32::RawLiteral::kPlacedWhenUsed,
247                                       vixl32::RawLiteral::kDeletedOnPoolDestruction);
248     return literal;
249   }
250 
251  private:
252   // VIXL assembler.
253   ArmVIXLMacroAssembler vixl_masm_;
254 };
255 
256 // Thread register declaration.
257 extern const vixl32::Register tr;
258 // Marking register declaration.
259 extern const vixl32::Register mr;
260 
261 }  // namespace arm
262 }  // namespace art
263 
264 #endif  // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
265